blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0dd0fb2d347482fcc39221d04b6a381dab6cd16f | d87acfc6fa8dcf71ac26eebbd6069a938222efc3 | /captum/attr/_core/lrp.py | b40829da9c388417e8b280a4ee94afa8983edfd2 | [
"BSD-3-Clause"
] | permissive | pytorch/captum | aedeec58d34c7611ae8928144e9f2314f820c1ca | 945c582cc0b08885c4e2bfecb020abdfac0122f3 | refs/heads/master | 2023-09-04T08:49:54.120380 | 2023-07-08T00:30:37 | 2023-07-08T00:30:37 | 204,734,444 | 4,230 | 491 | BSD-3-Clause | 2023-09-08T17:58:15 | 2019-08-27T15:34:41 | Python | UTF-8 | Python | false | false | 18,328 | py | #!/usr/bin/env python3
import typing
from collections import defaultdict
from typing import Any, cast, List, Tuple, Union
import torch.nn as nn
from captum._utils.common import (
_format_output,
_format_tensor_into_tuples,
_is_tuple,
_register_backward_hook,
_run_forward,
)
from captum._utils.gradient import (
apply_gradient_requirements,
undo_gradient_requirements,
)
from captum._utils.typing import Literal, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr._utils.attribution import GradientAttribution
from captum.attr._utils.common import _sum_rows
from captum.attr._utils.custom_modules import Addition_Module
from captum.attr._utils.lrp_rules import EpsilonRule, PropagationRule
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
from torch.utils.hooks import RemovableHandle
class LRP(GradientAttribution):
r"""
Layer-wise relevance propagation is based on a backward propagation
mechanism applied sequentially to all layers of the model. Here, the
model output score represents the initial relevance which is decomposed
into values for each neuron of the underlying layers. The decomposition
is defined by rules that are chosen for each layer, involving its weights
and activations. Details on the model can be found in the original paper
[https://doi.org/10.1371/journal.pone.0130140]. The implementation is
inspired by the tutorial of the same group
[https://doi.org/10.1016/j.dsp.2017.10.011] and the publication by
Ancona et al. [https://openreview.net/forum?id=Sy21R9JAW].
"""
def __init__(self, model: Module) -> None:
r"""
Args:
model (Module): The forward function of the model or any modification of
it. Custom rules for a given layer need to be defined as attribute
`module.rule` and need to be of type PropagationRule. If no rule is
specified for a layer, a pre-defined default rule for the module type
is used.
"""
GradientAttribution.__init__(self, model)
self.model = model
self._check_rules()
@property
def multiplies_by_inputs(self) -> bool:
return True
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: Literal[False] = False,
verbose: bool = False,
) -> TensorOrTupleOfTensorsGeneric:
...
@typing.overload
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
*,
return_convergence_delta: Literal[True],
verbose: bool = False,
) -> Tuple[TensorOrTupleOfTensorsGeneric, Tensor]:
...
@log_usage()
def attribute(
self,
inputs: TensorOrTupleOfTensorsGeneric,
target: TargetType = None,
additional_forward_args: Any = None,
return_convergence_delta: bool = False,
verbose: bool = False,
) -> Union[
TensorOrTupleOfTensorsGeneric, Tuple[TensorOrTupleOfTensorsGeneric, Tensor]
]:
r"""
Args:
inputs (Tensor or tuple[Tensor, ...]): Input for which relevance is
propagated. If model takes a single
tensor as input, a single input tensor should be provided.
If model takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
target (int, tuple, Tensor, or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a tuple
containing multiple additional arguments including tensors
or any arbitrary python types. These arguments are provided to
model in order, following the arguments in inputs.
Note that attributions are not computed with respect
to these arguments.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
verbose (bool, optional): Indicates whether information on application
of rules is printed during propagation.
Returns:
*Tensor* or *tuple[Tensor, ...]* of **attributions**
or 2-element tuple of **attributions**, **delta**:
- **attributions** (*Tensor* or *tuple[Tensor, ...]*):
The propagated relevance values with respect to each
input feature. The values are normalized by the output score
value (sum(relevance)=1). To obtain values comparable to other
methods or implementations these values need to be multiplied
by the output score. Attributions will always
be the same size as the provided inputs, with each value
providing the attribution of the corresponding input index.
If a single tensor is provided as inputs, a single tensor is
returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned. The sum of attributions
is one and not corresponding to the prediction score as in other
implementations.
- **delta** (*Tensor*, returned if return_convergence_delta=True):
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
of examples in the inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities. It has one
>>> # Conv2D and a ReLU layer.
>>> net = ImageClassifier()
>>> lrp = LRP(net)
>>> input = torch.randn(3, 3, 32, 32)
>>> # Attribution size matches input size: 3x3x32x32
>>> attribution = lrp.attribute(input, target=5)
"""
self.verbose = verbose
self._original_state_dict = self.model.state_dict()
self.layers: List[Module] = []
self._get_layers(self.model)
self._check_and_attach_rules()
self.backward_handles: List[RemovableHandle] = []
self.forward_handles: List[RemovableHandle] = []
is_inputs_tuple = _is_tuple(inputs)
inputs = _format_tensor_into_tuples(inputs)
gradient_mask = apply_gradient_requirements(inputs)
try:
# 1. Forward pass: Change weights of layers according to selected rules.
output = self._compute_output_and_change_weights(
inputs, target, additional_forward_args
)
# 2. Forward pass + backward pass: Register hooks to configure relevance
# propagation and execute back-propagation.
self._register_forward_hooks()
normalized_relevances = self.gradient_func(
self._forward_fn_wrapper, inputs, target, additional_forward_args
)
relevances = tuple(
normalized_relevance
* output.reshape((-1,) + (1,) * (normalized_relevance.dim() - 1))
for normalized_relevance in normalized_relevances
)
finally:
self._restore_model()
undo_gradient_requirements(inputs, gradient_mask)
if return_convergence_delta:
return (
_format_output(is_inputs_tuple, relevances),
self.compute_convergence_delta(relevances, output),
)
else:
return _format_output(is_inputs_tuple, relevances) # type: ignore
def has_convergence_delta(self) -> bool:
return True
def compute_convergence_delta(
self, attributions: Union[Tensor, Tuple[Tensor, ...]], output: Tensor
) -> Tensor:
"""
Here, we use the completeness property of LRP: The relevance is conserved
during the propagation through the models' layers. Therefore, the difference
between the sum of attribution (relevance) values and model output is taken as
the convergence delta. It should be zero for functional attribution. However,
when rules with an epsilon value are used for stability reasons, relevance is
absorbed during propagation and the convergence delta is non-zero.
Args:
attributions (Tensor or tuple[Tensor, ...]): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
output (Tensor): The output value with respect to which
the attribution values are computed. This value corresponds to
the target score of a classification model. The given tensor
should only have a single element.
Returns:
*Tensor*:
- **delta** Difference of relevance in output layer and input layer.
"""
if isinstance(attributions, tuple):
for attr in attributions:
summed_attr = cast(
Tensor, sum(_sum_rows(attr) for attr in attributions)
)
else:
summed_attr = _sum_rows(attributions)
return output.flatten() - summed_attr.flatten()
def _get_layers(self, model: Module) -> None:
for layer in model.children():
if len(list(layer.children())) == 0:
self.layers.append(layer)
else:
self._get_layers(layer)
def _check_and_attach_rules(self) -> None:
for layer in self.layers:
if hasattr(layer, "rule"):
layer.activations = {} # type: ignore
layer.rule.relevance_input = defaultdict(list) # type: ignore
layer.rule.relevance_output = {} # type: ignore
pass
elif type(layer) in SUPPORTED_LAYERS_WITH_RULES.keys():
layer.activations = {} # type: ignore
layer.rule = SUPPORTED_LAYERS_WITH_RULES[type(layer)]() # type: ignore
layer.rule.relevance_input = defaultdict(list) # type: ignore
layer.rule.relevance_output = {} # type: ignore
elif type(layer) in SUPPORTED_NON_LINEAR_LAYERS:
layer.rule = None # type: ignore
else:
raise TypeError(
(
f"Module of type {type(layer)} has no rule defined and no"
"default rule exists for this module type. Please, set a rule"
"explicitly for this module and assure that it is appropriate"
"for this type of layer."
)
)
def _check_rules(self) -> None:
for module in self.model.modules():
if hasattr(module, "rule"):
if (
not isinstance(module.rule, PropagationRule)
and module.rule is not None
):
raise TypeError(
(
f"Please select propagation rules inherited from class "
f"PropagationRule for module: {module}"
)
)
def _register_forward_hooks(self) -> None:
for layer in self.layers:
if type(layer) in SUPPORTED_NON_LINEAR_LAYERS:
backward_handles = _register_backward_hook(
layer, PropagationRule.backward_hook_activation, self
)
self.backward_handles.extend(backward_handles)
else:
forward_handle = layer.register_forward_hook(
layer.rule.forward_hook # type: ignore
)
self.forward_handles.append(forward_handle)
if self.verbose:
print(f"Applied {layer.rule} on layer {layer}")
def _register_weight_hooks(self) -> None:
for layer in self.layers:
if layer.rule is not None:
forward_handle = layer.register_forward_hook(
layer.rule.forward_hook_weights # type: ignore
)
self.forward_handles.append(forward_handle)
def _register_pre_hooks(self) -> None:
for layer in self.layers:
if layer.rule is not None:
forward_handle = layer.register_forward_pre_hook(
layer.rule.forward_pre_hook_activations # type: ignore
)
self.forward_handles.append(forward_handle)
def _compute_output_and_change_weights(
self,
inputs: Tuple[Tensor, ...],
target: TargetType,
additional_forward_args: Any,
) -> Tensor:
try:
self._register_weight_hooks()
output = _run_forward(self.model, inputs, target, additional_forward_args)
finally:
self._remove_forward_hooks()
# Register pre_hooks that pass the initial activations from before weight
# adjustments as inputs to the layers with adjusted weights. This procedure
# is important for graph generation in the 2nd forward pass.
self._register_pre_hooks()
return output
def _remove_forward_hooks(self) -> None:
for forward_handle in self.forward_handles:
forward_handle.remove()
def _remove_backward_hooks(self) -> None:
for backward_handle in self.backward_handles:
backward_handle.remove()
for layer in self.layers:
if hasattr(layer.rule, "_handle_input_hooks"):
for handle in layer.rule._handle_input_hooks: # type: ignore
handle.remove()
if hasattr(layer.rule, "_handle_output_hook"):
layer.rule._handle_output_hook.remove() # type: ignore
def _remove_rules(self) -> None:
for layer in self.layers:
if hasattr(layer, "rule"):
del layer.rule
def _clear_properties(self) -> None:
for layer in self.layers:
if hasattr(layer, "activation"):
del layer.activation
def _restore_state(self) -> None:
self.model.load_state_dict(self._original_state_dict) # type: ignore
def _restore_model(self) -> None:
self._restore_state()
self._remove_backward_hooks()
self._remove_forward_hooks()
self._remove_rules()
self._clear_properties()
def _forward_fn_wrapper(self, *inputs: Tensor) -> Tensor:
"""
Wraps a forward function with addition of zero as a workaround to
https://github.com/pytorch/pytorch/issues/35802 discussed in
https://github.com/pytorch/captum/issues/143#issuecomment-611750044
#TODO: Remove when bugs are fixed
"""
adjusted_inputs = tuple(
input + 0 if input is not None else input for input in inputs
)
return self.model(*adjusted_inputs)
SUPPORTED_LAYERS_WITH_RULES = {
nn.MaxPool1d: EpsilonRule,
nn.MaxPool2d: EpsilonRule,
nn.MaxPool3d: EpsilonRule,
nn.Conv2d: EpsilonRule,
nn.AvgPool2d: EpsilonRule,
nn.AdaptiveAvgPool2d: EpsilonRule,
nn.Linear: EpsilonRule,
nn.BatchNorm2d: EpsilonRule,
Addition_Module: EpsilonRule,
}
SUPPORTED_NON_LINEAR_LAYERS = [nn.ReLU, nn.Dropout, nn.Tanh]
| [
"[email protected]"
] | |
a5ca0dba0718aa09e17d7542056fc9af17a7eb38 | 49c2492d91789b3c2def7d654a7396e8c6ce6d9f | /ROS/catkin_ws/build/dyros_simulator/dataspeed_can_tools/catkin_generated/pkg.installspace.context.pc.py | aaf615e0d8cdbcc7a35dbfeacc60e39121b30380 | [] | no_license | DavidHan008/lockdpwn | edd571165f9188e0ee93da7222c0155abb427927 | 5078a1b08916b84c5c3723fc61a1964d7fb9ae20 | refs/heads/master | 2021-01-23T14:10:53.209406 | 2017-09-02T18:02:50 | 2017-09-02T18:02:50 | 102,670,531 | 0 | 2 | null | 2017-09-07T00:11:33 | 2017-09-07T00:11:33 | null | UTF-8 | Python | false | false | 453 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldataspeed_can_tools".split(';') if "-ldataspeed_can_tools" != "" else []
PROJECT_NAME = "dataspeed_can_tools"
PROJECT_SPACE_DIR = "/home/dyros-vehicle/gitrepo/lockdpwn/ROS/catkin_ws/install"
PROJECT_VERSION = "1.0.4"
| [
"[email protected]"
] | |
d5fc2e9c95367713fde53a9b10a7e522573cc1da | 4fe1dc7170d2d44e2c9988c71b08f66d469ee4b8 | /Appendices/E/ejE5.py | 77ce7a4cada9e676303b27e369f41adfd4fb3073 | [] | no_license | ftorresi/PythonLearning | 53c0689a6f3e7e219a6314a673a318b25cda82d1 | f2aeb5f81d9090a5a5aa69a8d1203688e9f01adf | refs/heads/master | 2023-01-12T00:40:05.806774 | 2020-11-13T14:33:08 | 2020-11-13T14:33:08 | 267,460,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,930 | py | """Now we solve the ODE problem u - 10u' = 0 u(0)= 0.2 in [0,20] using HEUN's method"""
import numpy as np
import matplotlib.pyplot as plt
#Exact solution
def exact_u(t):
return 0.2*np.exp(0.1*t)
#u'=f(u,t) as a class
class f:
def __init__(self):
pass
def __call__(self,u,t):
return 0.1*u
#Forward Euler Method as a class
class Heun:
def __init__(self, f, U0, T, n):
if not callable(f):
raise TypeError('f is %s, not a function' % type(f))
self.f, self.U0, self.T, self.n = f, U0, T, n
self.dt = T/float(n)
self.u = np.zeros(n+1)
self.t = np.linspace(0,T,n+1)
def solve(self):
"""Compute solution for 0 <= t <= T."""
self.u[0] = float(self.U0)
for k in range(self.n):
self.k = k
self.u[k+1] = self.advance()
return self.u, self.t
def advance(self):
"""Advance the solution one time step."""
u, dt, f, k, t = self.u, self.dt, self.f, self.k, self.t
f_eval=f(u[k], t[k])
u_mid= u[k] + dt*f_eval
u_new = u[k] + 0.5*dt*(f_eval+f(u_mid, t[k+1]))
return u_new
#Parameters
T=20
U0=0.2
#Plot exact solution
tgrid=np.linspace(0,T,2001)
uexact=exact_u(tgrid)
plt.plot(tgrid, uexact, "r-", label="Exact Solution")
#Numerical calculations and plots
nlist=[4,40,400]
f_init=f()
for n in nlist:
solver=Heun(f=f_init, U0=U0, T=T, n=n)
sol, t = solver.solve()
plt.plot(t, sol, "--", label="dt=%g"%(t[1]-t[0]))
plt.legend()
plt.title("u-10u'=0, u(0)=0.2 with Heun's method")
plt.xlabel("t")
plt.ylabel("u(t)")
plt.savefig("ejE5.png")
#Save to file (only last solution)
with open("ejE5.out","w") as outfile:
outfile.write("Numerical Solution to u-10u'=0, u(0)=0.2 with Heun's method\n")
outfile.write(" t u(t)\n")
for i in range(len(t)):
outfile.write("%5.2f %7.4f\n"%(t[i], sol[i]))
| [
"[email protected]"
] | |
cd97fc37637ebbea191dfde0b5de158f4d957ec8 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractNorthbladetlBlogspotCom.py | 6ae6636f041f4c6171da2a37228f9012d3e74405 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 562 | py |
def extractNorthbladetlBlogspotCom(item):
'''
Parser for 'northbladetl.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"[email protected]"
] | |
aeda73e4de7393ca198519384998e625a5a63d26 | 6f33381dcb19a042d916b4a452f9cb7438729798 | /jabba/graphs/legend.py | 6767d1bc8f8140580a6220c89e4327bd31cd22ab | [
"MIT"
] | permissive | puppetlabs/jabba | 8308adf5be9ba25efb414f384bf3568854be55e2 | 71c1d008ab497020fba6ffa12a600721eb3f5ef7 | refs/heads/master | 2023-06-13T09:17:49.274408 | 2017-06-30T11:02:27 | 2017-06-30T11:02:27 | 185,443,592 | 0 | 1 | null | 2019-05-07T16:54:03 | 2019-05-07T16:54:02 | null | UTF-8 | Python | false | false | 939 | py |
import graphviz as gv
class Legend(object):
"""
GraphViz graph for rendering legend
"""
def __init__(self):
# Legend is presented as map (name -> settings)
self.items = {}
def add_item(self, name, settings):
self.items[name] = settings
def render(self):
legend = gv.Digraph('cluster_legend')
legend.body.extend(['label="Legend"'])
for name, settings in self.items.items():
legend.node("{}-1".format(name), label="")
legend.node("{}-2".format(name), label="")
# format label so it doesn't overlap with edge
label = " {}".format(name)
legend.edge("{}-1".format(name), "{}-2".format(name), label=label, **settings)
legend_wrapper = gv.Digraph('cluster_legend_wrapper')
legend_wrapper.subgraph(legend)
legend_wrapper.body.extend(['style=invis'])
return legend_wrapper
| [
"[email protected]"
] | |
61bc8a3a202bc70ca7a6d6c6a4c970e5e87ea59c | 06b25df867b9a4741b4ca803eceb254aa50758e9 | /editor_api/rest/lum.py | acdf87c0a73ffc1bdb30ca30bc06f1dcb3063474 | [
"MIT"
] | permissive | jphuart/swatplus-automatic-workflow | e5ceaa745096926176d9fc45042f836e628d0504 | dd2eeb7f882eb2d4ab7e1e5265c10b9beb93ddc4 | refs/heads/master | 2023-08-15T02:47:40.742352 | 2021-10-05T14:57:19 | 2021-10-05T14:57:19 | 282,170,706 | 0 | 0 | null | 2020-07-24T08:52:25 | 2020-07-24T08:52:24 | null | UTF-8 | Python | false | false | 19,887 | py | from flask_restful import Resource, reqparse, abort
from playhouse.shortcuts import model_to_dict
from peewee import *
from .base import BaseRestModel
from database.project import base
from database.project.setup import SetupProjectDatabase
from database.project.lum import Landuse_lum, Management_sch, Cntable_lum, Cons_prac_lum, Ovn_table_lum, Management_sch_auto, Management_sch_op
from database.project.structural import Tiledrain_str, Septic_str, Filterstrip_str, Grassedww_str, Bmpuser_str
from database.project.hru_parm_db import Urban_urb
from database.project.init import Plant_ini
from database.project.decision_table import D_table_dtl
from database.datasets.setup import SetupDatasetsDatabase
from database.datasets import lum as ds_lum
from database import lib
from helpers import utils
invalid_name_msg = 'Invalid name {name}. Please ensure the value exists in your database.'
def get_landuse_args(get_selected_ids=False):
parser = reqparse.RequestParser()
if get_selected_ids:
parser.add_argument('selected_ids', type=int, action='append', required=False, location='json')
else:
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('description', type=str, required=False, location='json')
parser.add_argument('cal_group', type=str, required=False, location='json')
parser.add_argument('urb_ro', type=str, required=False, location='json')
parser.add_argument('plnt_com_name', type=str, required=False, location='json')
parser.add_argument('mgt_name', type=str, required=False, location='json')
parser.add_argument('cn2_name', type=str, required=False, location='json')
parser.add_argument('cons_prac_name', type=str, required=False, location='json')
parser.add_argument('urban_name', type=str, required=False, location='json')
parser.add_argument('ov_mann_name', type=str, required=False, location='json')
parser.add_argument('tile_name', type=str, required=False, location='json')
parser.add_argument('sep_name', type=str, required=False, location='json')
parser.add_argument('vfs_name', type=str, required=False, location='json')
parser.add_argument('grww_name', type=str, required=False, location='json')
parser.add_argument('bmp_name', type=str, required=False, location='json')
args = parser.parse_args(strict=True)
return args
def save_landuse_args(self, m, args):
m.name = args['name']
m.description = args['description']
m.cal_group = utils.remove_space(args['cal_group'])
m.urb_ro = args['urb_ro']
m.plnt_com_id = self.get_id_from_name(Plant_ini, args['plnt_com_name'])
m.mgt_id = self.get_id_from_name(Management_sch, args['mgt_name'])
m.cn2_id = self.get_id_from_name(Cntable_lum, args['cn2_name'])
m.cons_prac_id = self.get_id_from_name(Cons_prac_lum, args['cons_prac_name'])
m.urban_id = self.get_id_from_name(Urban_urb, args['urban_name'])
m.ov_mann_id = self.get_id_from_name(Ovn_table_lum, args['ov_mann_name'])
m.tile_id = self.get_id_from_name(Tiledrain_str, args['tile_name'])
m.sep_id = self.get_id_from_name(Septic_str, args['sep_name'])
m.vfs_id = self.get_id_from_name(Filterstrip_str, args['vfs_name'])
m.grww_id = self.get_id_from_name(Grassedww_str, args['grww_name'])
m.bmp_id = self.get_id_from_name(Bmpuser_str, args['bmp_name'])
return m.save()
class LanduseLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Landuse_lum
list_name = 'landuse'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name, True)
class LanduseLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Landuse_lum, 'Landuse', True)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Landuse_lum, 'Landuse')
def put(self, project_db, id):
args = get_landuse_args()
try:
SetupProjectDatabase.init(project_db)
m = Landuse_lum.get(Landuse_lum.id == id)
result = save_landuse_args(self, m, args)
if result > 0:
return 200
abort(400, message='Unable to update land use properties {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Land use name must be unique.')
except Landuse_lum.DoesNotExist:
abort(404, message='Land use properties {id} does not exist'.format(id=id))
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class LanduseLumPostApi(BaseRestModel):
def post(self, project_db):
args = get_landuse_args()
try:
SetupProjectDatabase.init(project_db)
m = Landuse_lum()
result = save_landuse_args(self, m, args)
if result > 0:
return 200
abort(400, message='Unable to update channel properties {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Channel properties name must be unique.')
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class LanduseLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Landuse_lum)
def put(self, project_db):
SetupProjectDatabase.init(project_db)
args = get_landuse_args(True)
try:
param_dict = {}
if args['cal_group'] is not None:
param_dict['cal_group'] = utils.remove_space(args['cal_group'])
if args['urb_ro'] is not None:
param_dict['urb_ro'] = args['urb_ro']
if args['plnt_com_name'] is not None:
param_dict['plnt_com_id'] = self.get_id_from_name(Plant_ini, args['plnt_com_name'])
if args['mgt_name'] is not None:
param_dict['mgt_id'] = self.get_id_from_name(Management_sch, args['mgt_name'])
if args['cn2_name'] is not None:
param_dict['cn2_id'] = self.get_id_from_name(Cntable_lum, args['cn2_name'])
if args['cons_prac_name'] is not None:
param_dict['cons_prac_id'] = self.get_id_from_name(Cons_prac_lum, args['cons_prac_name'])
if args['urban_name'] is not None:
param_dict['urban_id'] = self.get_id_from_name(Urban_urb, args['urban_name'])
if args['ov_mann_name'] is not None:
param_dict['ov_mann_id'] = self.get_id_from_name(Ovn_table_lum, args['ov_mann_name'])
if args['tile_name'] is not None:
param_dict['tile_id'] = self.get_id_from_name(Tiledrain_str, args['tile_name'])
if args['sep_name'] is not None:
param_dict['sep_id'] = self.get_id_from_name(Septic_str, args['sep_name'])
if args['vfs_name'] is not None:
param_dict['vfs_id'] = self.get_id_from_name(Filterstrip_str, args['vfs_name'])
if args['grww_name'] is not None:
param_dict['grww_id'] = self.get_id_from_name(Grassedww_str, args['grww_name'])
if args['bmp_name'] is not None:
param_dict['bmp_id'] = self.get_id_from_name(Bmpuser_str, args['bmp_name'])
query = Landuse_lum.update(param_dict).where(Landuse_lum.id.in_(args['selected_ids']))
result = query.execute()
if result > 0:
return 200
abort(400, message='Unable to update channel properties.')
except Plant_ini.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['plnt_com_name']))
except Management_sch.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['mgt_name']))
except Cntable_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cn2_name']))
except Cons_prac_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['cons_prac_name']))
except Urban_urb.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['urban_name']))
except Ovn_table_lum.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['ov_mann_name']))
except Tiledrain_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['tile_name']))
except Septic_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['sep_name']))
except Filterstrip_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['vfs_name']))
except Grassedww_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['grww_name']))
except Bmpuser_str.DoesNotExist:
abort(400, message=invalid_name_msg.format(name=args['bmp_name']))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
def save_cntable_lum(m, args):
m.name = args['name']
m.description = utils.remove_space(args['description'])
m.cn_a = args['cn_a']
m.cn_b = args['cn_b']
m.cn_c = args['cn_c']
m.cn_d = args['cn_d']
m.treat = utils.remove_space(args['treat'])
m.cond_cov = utils.remove_space(args['cond_cov'])
return m.save()
class CntableLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Cntable_lum
list_name = 'cntable'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class CntableLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Cntable_lum, 'Curve Number')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Cntable_lum, 'Curve Number')
def put(self, project_db, id):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db)
m = Cntable_lum.get(Cntable_lum.id == id)
result = save_cntable_lum(m, args)
if result > 0:
return 200
abort(400, message='Unable to update curve number table {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Curve number table name must be unique.')
except Cntable_lum.DoesNotExist:
abort(404, message='Curve number table {id} does not exist'.format(id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Cntable_lum)
def put(self, project_db):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db, True)
remove_spaces = ['description', 'treat', 'cond_cov']
param_dict = {}
for key in args.keys():
if args[key] is not None and key != 'selected_ids':
param_dict[key] = utils.remove_space(args[key]) if key in remove_spaces else args[key]
query = Cntable_lum.update(param_dict).where(Cntable_lum.id.in_(args['selected_ids']))
result = query.execute()
if result > 0:
return 200
abort(400, message='Unable to update curve number tables.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumPostApi(BaseRestModel):
def post(self, project_db):
try:
SetupProjectDatabase.init(project_db)
args = self.get_args('cntable_lum', project_db)
m = Cntable_lum()
result = save_cntable_lum(m, args)
if result > 0:
return model_to_dict(m), 201
abort(400, message='Unable to update curve number table {id}.'.format(id=id))
except IntegrityError as e:
abort(400, message='Curve number table name must be unique.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class CntableLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Cntable_lum, 'Curve number table')
class OvntableLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Ovn_table_lum
list_name = 'ovntable'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class OvntableLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Ovn_table_lum, 'Mannings n')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Ovn_table_lum, 'Mannings n')
def put(self, project_db, id):
return self.base_put(project_db, id, Ovn_table_lum, 'Mannings n')
class OvntableLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Ovn_table_lum)
def put(self, project_db):
return self.base_put_many(project_db, Ovn_table_lum, 'Mannings n')
class OvntableLumPostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Ovn_table_lum, 'Mannings n')
class OvntableLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Ovn_table_lum, 'Mannings n table')
class ConsPracLumListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Cons_prac_lum
list_name = 'cons_prac'
return self.base_paged_list(project_db, sort, reverse, page, items_per_page, table, list_name)
class ConsPracLumApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Cons_prac_lum, 'Conservation practice')
def delete(self, project_db, id):
return self.base_delete(project_db, id, Cons_prac_lum, 'Conservation practice')
def put(self, project_db, id):
return self.base_put(project_db, id, Cons_prac_lum, 'Conservation practice')
class ConsPracLumUpdateManyApi(BaseRestModel):
def get(self, project_db):
return self.base_name_id_list(project_db, Cons_prac_lum)
def put(self, project_db):
return self.base_put_many(project_db, Cons_prac_lum, 'Conservation practice')
class ConsPracLumPostApi(BaseRestModel):
def post(self, project_db):
return self.base_post(project_db, Cons_prac_lum, 'Conservation practice')
class ConsPracLumDatasetsApi(BaseRestModel):
def get(self, datasets_db, name):
return self.base_get_datasets_name(datasets_db, name, ds_lum.Cons_prac_lum, 'Conservation practices')
def get_mgt_args():
parser = reqparse.RequestParser()
parser.add_argument('id', type=int, required=False, location='json')
parser.add_argument('name', type=str, required=True, location='json')
parser.add_argument('auto_ops', type=list, required=False, location='json')
parser.add_argument('operations', type=list, required=False, location='json')
args = parser.parse_args(strict=True)
return args
class ManagementSchListApi(BaseRestModel):
def get(self, project_db, sort, reverse, page, items_per_page):
table = Management_sch
list_name = 'mgt_sch'
SetupProjectDatabase.init(project_db)
total = table.select().count()
sort_val = SQL(sort)
if reverse == 'true':
sort_val = SQL(sort).desc()
m = table.select().order_by(sort_val).paginate(int(page), int(items_per_page))
ml = [{'id': v.id, 'name': v.name, 'num_ops': len(v.operations), 'num_auto': len(v.auto_ops)} for v in m]
return {'total': total, list_name: ml}
class ManagementSchApi(BaseRestModel):
def get(self, project_db, id):
return self.base_get(project_db, id, Management_sch, 'Management schedule', back_refs=True, max_depth=2)
def delete(self, project_db, id):
return self.base_delete(project_db, id, Management_sch, 'Management schedule')
def put(self, project_db, id):
try:
SetupProjectDatabase.init(project_db)
args = get_mgt_args()
m = Management_sch.get(Management_sch.id == id)
m.name = args['name']
m.save()
new_auto = []
for a in args['auto_ops']:
try:
dt = D_table_dtl.get((D_table_dtl.file_name == 'lum.dtl') & (D_table_dtl.name == a))
new_auto.append({'management_sch_id': m.id, 'd_table_id': dt.id})
except D_table_dtl.DoesNotExist:
abort(404, message='Decision table {name} does not exist'.format(name=a))
new_ops = []
order = 1
for o in args['operations']:
new_ops.append({
'management_sch_id': m.id,
'op_typ': o['op_typ'],
'mon': o['mon'],
'day': o['day'],
'op_data1': o['op_data1'],
'op_data2': o['op_data2'],
'op_data3': o['op_data3'],
'order': o['order'],
'hu_sch': o['hu_sch']
})
order += 1
Management_sch_auto.delete().where(Management_sch_auto.management_sch_id == m.id).execute()
lib.bulk_insert(base.db, Management_sch_auto, new_auto)
Management_sch_op.delete().where(Management_sch_op.management_sch_id == m.id).execute()
lib.bulk_insert(base.db, Management_sch_op, new_ops)
return 200
except IntegrityError as e:
abort(400, message='Management schedule name must be unique.')
except Cons_prac_lum.DoesNotExist:
abort(404, message='Management schedule {id} does not exist'.format(id=id))
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
class ManagementSchPostApi(BaseRestModel):
def post(self, project_db):
try:
args = get_mgt_args()
m = Management_sch()
m.name = args['name']
m.save()
new_auto = []
for a in args['auto_ops']:
try:
dt = D_table_dtl.get((D_table_dtl.file_name == 'lum.dtl') & (D_table_dtl.name == a))
new_auto.append({'management_sch_id': m.id, 'd_table_id': dt.id})
except D_table_dtl.DoesNotExist:
abort(404, message='Decision table {name} does not exist'.format(name=a))
new_ops = []
order = 1
for o in args['operations']:
new_ops.append({
'management_sch_id': m.id,
'op_typ': o['op_typ'],
'mon': o['mon'],
'day': o['day'],
'op_data1': o['op_data1'],
'op_data2': o['op_data2'],
'op_data3': o['op_data3'],
'order': o['order'],
'hu_sch': o['hu_sch']
})
order += 1
lib.bulk_insert(base.db, Management_sch_auto, new_auto)
lib.bulk_insert(base.db, Management_sch_op, new_ops)
return 201
except IntegrityError as e:
abort(400, message='Management schedule name must be unique.')
except Exception as ex:
abort(400, message="Unexpected error {ex}".format(ex=ex))
| [
"[email protected]"
] | |
90254a2e8ba7c81e196fd637cbd4598c1fdaa717 | 3e1b5d7cb529be1529ae45fa062a423f8328d6d2 | /Edgar-new-codes/Getting-10-K- and-10-Q doc links.py | 54aab244b45e5b116a35eee0099a5ad3de1aba53 | [] | no_license | abhigupta4/Finrsch | 260687b3b53d3c94a03dc2b9e640952718033486 | ca0b7f1631fbbe109b81403b9ffc36c67c759d23 | refs/heads/master | 2021-01-19T10:54:19.705213 | 2017-07-06T12:22:37 | 2017-07-06T12:22:37 | 87,913,197 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,806 | py | import requests
from bs4 import BeautifulSoup
import urlparse
def get_file(cur,val):
base = 'https://www.sec.gov'
r = requests.get(cur)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
if 'Archives' in link.get("href"):
print 'Document link'
print base+link.get("href")
break
def take_second_link(cur,cik,val):
begin = 'https://www.sec.gov'
r = requests.get(cur)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
temp = link.get("href")
if 'index' in temp and 'headers' not in temp and cik in temp:
print
print 'Company link'
if val:
print "Type 10-K"
else:
print "Type 10-Q"
print begin+temp
get_file(begin + temp,val)
def find_link1(entire,val):
begin = 'https://www.sec.gov/Archives/edgar/data/'
for part in entire:
if 'data' in part:
temp = part.split('/')
last = ''
for ele in temp[-1]:
if ele.isdigit():
last += ele
new = begin + temp[-2] + '/' + last
take_second_link(new,temp[-2],val)
def inside_index(link1):
r = requests.get(main_link+link1)
document = BeautifulSoup(r.content,"lxml")
soup = document.get_text()
lines = soup.split("\n")
flag = 1
for line in lines:
temp = line.split(" ")
for i in xrange(len(temp)):
if temp[i] == '10-Q' and temp[i-1] == '' and temp[i+1] == '':
find_link1(temp,0)
break
if temp[i] == '10-K' and temp[i-1] == '' and temp[i+1] == '':
find_link1(temp,1)
break
main_link = 'https://www.sec.gov/Archives/edgar/daily-index/2017/QTR2/'
r = requests.get(main_link)
document = BeautifulSoup(r.content,"lxml")
links = document.find_all('a')
for link in links:
if 'company' in link.get("href") and '.idx' in link.get("href"):
inside_index(link.get("href"))
# break | [
"[email protected]"
] | |
ec47bc066bc69f6cf12e1ef76fe29f8be677394c | 5667cc877342204b7d54b6c3cc5a9f4854f08829 | /.history/apppersona/views_20201101174230.py | 3ca271ad58977d9585b9c4096dc875f160abb1d5 | [] | no_license | Nyckhos/TestCommit | d62e3f6fefb04ab5647475cc7ead0d72cbd89efa | 9aa8e2e35280b7862960cc8a864e9c02ac7f4796 | refs/heads/main | 2023-01-05T05:57:59.223641 | 2020-11-02T02:08:18 | 2020-11-02T02:08:18 | 309,237,224 | 2 | 0 | null | 2020-11-02T02:30:43 | 2020-11-02T02:30:43 | null | UTF-8 | Python | false | false | 3,787 | py | from django.http import request
from django.shortcuts import redirect, render
from django.http import HttpResponse
from .models import *
from .forms import *
from django.contrib.auth.models import User
from django.contrib.auth import *
from django.urls import reverse
from django.contrib.auth import login
from django.contrib.auth.decorators import *
from django.contrib.admin.views.decorators import *
from django.shortcuts import render, redirect
from django.core.mail import send_mail, BadHeaderError
from django.http import HttpResponse
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.db.models.query_utils import Q
from django.utils.http import urlsafe_base64_encode
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes
# Create your views here.
#@login_required
#def index(request):
# return render(request,'appersona/index.html')
def lista_personas(request):
lista = User.objects.all() # Todas las personas
return render(request, 'apppersona/lista_personas.html', {'lista': lista})
def lista_tarjetas(request):
tarjetas = TarjetaJunaeb.objects.all()
return render(request, 'apppersona/lista_tarjetas.html', {'listaTarjetas': tarjetas})
def tarjetas_con_plata(request):
tarjetas = TarjetaJunaeb.objects.filter(montoDisponible__gte=1)
return render(request, 'apppersona/lista_tarjetas.html', {'listaTarjetas': tarjetas})
def index(request):
return render(request, 'apppersona/index.html')
def contacto(request):
return render(request, 'apppersona/contacto.html')
def nosotros(request):
return render(request, 'apppersona/nosotros.html')
def register(request):
if request.method == "POST":
form = ExtendedUserCreationForm(request.POST)
profile_form = FormularioPersona(request.POST)
if form.is_valid() and profile_form.is_valid():
user = form.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('index')
else:
form = ExtendedUserCreationForm()
profile_form = FormularioPersona()
context = {'form': form, 'profile_form': profile_form}
return render(request, "apppersona/registro.html", context)
def password_reset_request(request):
if request.method == "POST":
password_reset_form = PasswordResetForm(request.POST)
if password_reset_form.is_valid():
data = password_reset_form.cleaned_data['email']
associated_users = User.objects.filter(Q(email=data))
if associated_users.exists():
for user in associated_users:
subject = "Password Reset Requested"
email_template_name = "main/password/password_reset_email.txt"
c = {
"email":user.email,
'domain':'127.0.0.1:8000',
'site_name': 'Website',
"uid": urlsafe_base64_encode(force_bytes(user.pk)),
"user": user,
'token': default_token_generator.make_token(user),
'protocol': 'http',
}
email = render_to_string(email_template_name, c)
try:
send_mail(subject, email, '[email protected]' , [user.email], fail_silently=False)
except BadHeaderError:
return HttpResponse('Invalid header found.')
return redirect ("/password_reset/done/")
password_reset_form = PasswordResetForm()
return render(request=request, template_name="main/password/password_reset.html", context={"password_reset_form":password_reset_form})
| [
"[email protected]"
] | |
d3bdbc5461a26a5c7fe0183620159be6662d508c | a9a2f66671fadf765d7feb511a4a5d9b9f4ef362 | /test/agent/server/local_elastic_agent_test.py | d86d3dc297c43d5bceef33af548cc125f0b9dd84 | [
"BSD-3-Clause"
] | permissive | BobLiu20/elastic | 64885d164d485976ea8740672c454c212bab4ff8 | e371fe57672aea91d2466f5e04884028d8dca649 | refs/heads/master | 2022-11-11T10:22:20.181835 | 2020-07-01T23:30:44 | 2020-07-01T23:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,667 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import time
import unittest
import uuid
from unittest.mock import patch
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
import torchelastic.rendezvous.etcd_rendezvous # noqa: F401
from test_utils import is_tsan
from torch.distributed.rpc.backend_registry import BackendType
from torchelastic.agent.server.api import (
WorkerGroupFailureException,
WorkerSpec,
WorkerState,
)
from torchelastic.agent.server.local_elastic_agent import LocalElasticAgent
from torchelastic.rendezvous.etcd_server import EtcdServer
def _happy_function():
return
def _sad_function():
raise RuntimeError("sad because i throw")
def _bipolar_function():
rank = int(os.environ["RANK"])
if rank % 2 == 0:
_happy_function()
else:
_sad_function()
def _distributed_sum(wait):
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
dist.init_process_group(backend="gloo")
t = torch.tensor(rank)
time.sleep(wait)
dist.all_reduce(t, op=dist.reduce_op.SUM)
expected_sum = sum(range(world_size))
actual = t.item()
if expected_sum != actual:
raise RuntimeError(f"Expected rank sum {expected_sum}, got {actual}")
def _simulate_work(wait):
time.sleep(wait)
rank = int(os.environ["RANK"])
return rank
def _check_rank_assignment():
group_rank = int(os.environ["GROUP_RANK"])
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
role_rank = int(os.environ["ROLE_RANK"])
role_world_size = int(os.environ["ROLE_WORLD_SIZE"])
return (group_rank, rank, world_size, role_rank, role_world_size)
def echo(msg):
return msg
def _return_rank_times(a):
return int(os.environ["RANK"]) * a
def _check_env_function():
# just check these env vars exist, os.environ[...] will naturally throw
# if the variable does not exist
os.environ["RANK"]
os.environ["LOCAL_RANK"]
os.environ["ROLE_RANK"]
os.environ["GROUP_RANK"]
os.environ["LOCAL_WORLD_SIZE"]
os.environ["ROLE_WORLD_SIZE"]
os.environ["WORLD_SIZE"]
os.environ["MASTER_ADDR"]
os.environ["MASTER_PORT"]
os.environ["TORCHELASTIC_RESTART_COUNT"]
os.environ["TORCHELASTIC_MAX_RESTARTS"]
os.environ["TORCHELASTIC_RUN_ID"]
def _run_agent(
run_id,
etcd_host,
etcd_port,
min_size,
max_size,
func_to_run,
args,
local_world_size=8,
role="test_trainer",
output_dict=None,
agent_barrier_timeout=300,
):
rdzv_handler = dist.rendezvous(
f"etcd://{etcd_host}:{etcd_port}/{run_id}"
f"?min_workers={min_size}"
f"&max_workers={max_size}"
)
spec = WorkerSpec(
role=role,
local_world_size=local_world_size,
fn=func_to_run,
args=args,
rdzv_handler=rdzv_handler,
max_restarts=2,
monitor_interval=1,
)
agent = LocalElasticAgent(
spec, start_method="fork", exit_barrier_timeout=agent_barrier_timeout
)
res = agent.run()
if output_dict is not None:
key = str(uuid.uuid4().int)
output_dict[key] = (role, res)
class LocalElasticAgentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_happy_function(self):
spec = self._get_worker_spec(fn=_happy_function)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
def _get_worker_spec(
self,
fn,
args=(),
max_restarts=1,
num_agents=1,
monitor_interval=0.1,
local_world_size=8,
):
run_id = str(uuid.uuid4().int)
rdzv_handler = dist.rendezvous(
f"etcd://{self._etcd_server.get_endpoint()}/{run_id}"
f"?min_workers={num_agents}"
f"&max_workers={num_agents}"
)
spec = WorkerSpec(
role="test_trainer",
local_world_size=local_world_size,
fn=fn,
args=args,
rdzv_handler=rdzv_handler,
max_restarts=max_restarts,
monitor_interval=monitor_interval,
)
return spec
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_distributed_sum(self):
spec = self._get_worker_spec(fn=_distributed_sum, args=(0,))
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
class RoleConfig:
__slots__ = ["role", "workers", "num_agents", "workers_num", "role_size"]
def __init__(
self, role: str, workers=None, num_agents: int = 0, workers_num: int = 0
):
self.role = role
self.workers = workers
if workers_num != 0 and num_agents != 0:
self.workers = [workers_num] * num_agents
self.role_size = sum(self.workers)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_correct_rank_assignment_heterogeneous(self):
roles_config = [
self.RoleConfig("trainer", workers=[1, 2, 3, 4]),
self.RoleConfig("ps", workers=[5, 2]),
# split configuration to run the last one on the main process
self.RoleConfig("master", workers=[8]),
]
self.run_configuration(roles_config, 25)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_correct_rank_assignment_homogeneous(self):
num_workers = 4
roles_config = [
self.RoleConfig("trainer", num_agents=4, workers_num=num_workers),
self.RoleConfig("ps", num_agents=2, workers_num=num_workers),
# split configuration to run the last one on the main process
self.RoleConfig("master", num_agents=1, workers_num=num_workers),
]
self.run_configuration(roles_config, 28)
def run_configuration(self, roles_config, expected_world_size):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = sum(len(cfg.workers) for cfg in roles_config)
run_id = str(uuid.uuid4().int)
procs = []
manager = multiprocessing.Manager()
return_dict = manager.dict()
default_args = (run_id, host, port, nnodes, nnodes, _check_rank_assignment, ())
for ind in range(len(roles_config) - 1):
config = roles_config[ind]
for num_workers in config.workers:
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, num_workers, config.role, return_dict),
)
procs.append(p)
p.start()
# run one on the main process for debugging
config = roles_config[len(roles_config) - 1]
_run_agent(*default_args, config.workers[0], config.role, return_dict)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
role_info_dict = {role_info.role: role_info for role_info in roles_config}
self.verify_rank_consistency(return_dict, role_info_dict, expected_world_size)
def verify_rank_consistency(self, return_dict, role_info_dict, expected_world_size):
role_ranks = {}
global_ranks = []
grouped_ranks = {}
for role, res in return_dict.values():
for (
group_rank,
rank,
world_size,
role_rank,
role_world_size,
) in res.values():
role_info_config = role_info_dict[role]
self.assertEqual(expected_world_size, world_size)
self.assertEqual(role_info_config.role_size, role_world_size)
if group_rank not in grouped_ranks:
grouped_ranks[group_rank] = []
grouped_ranks[group_rank].append((rank, role_rank))
global_ranks.append(rank)
if role not in role_ranks:
role_ranks[role] = []
role_ranks[role].append(role_rank)
global_ranks = sorted(global_ranks)
self.assertEqual(list(range(0, expected_world_size)), global_ranks)
for role, role_config_info in role_info_dict.items():
self.assertEqual(
list(range(0, role_config_info.role_size)), sorted(role_ranks[role])
)
# Make sure that each agent assignes consecutive ranks to workes
# The first argument is the global_rank and the second argument
# is role_rank
for ranks_lst in grouped_ranks.values():
self.verify_ranks_sequential(ranks_lst, 0)
self.verify_ranks_sequential(ranks_lst, 1)
def verify_ranks_sequential(self, ranks_pairs, rank_idx):
ranks = sorted(rank_pair[rank_idx] for rank_pair in ranks_pairs)
start_rank, end_rank = ranks[0], ranks[-1]
self.assertEqual(list(range(start_rank, end_rank + 1)), ranks)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_distributed_sum_heterogenous(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 4
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _distributed_sum, (0,))
for ind in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent, args=(*default_args, ind + 1)
)
procs.append(p)
p.start()
# run one on the main process for debugging
_run_agent(*default_args, 8)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_sad_function(self):
spec = self._get_worker_spec(fn=_sad_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
with self.assertRaises(WorkerGroupFailureException) as cm:
agent.run()
excs = cm.exception.get_worker_exceptions()
for i in range(spec.local_world_size):
self.assertTrue(isinstance(excs[i], Exception))
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertEqual(0, agent._remaining_restarts)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_bipolar_function(self):
spec = self._get_worker_spec(fn=_bipolar_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
with self.assertRaises(Exception):
agent.run()
self.assertEqual(WorkerState.FAILED, agent.get_worker_group().state)
self.assertEqual(0, agent._remaining_restarts)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_check_env_function(self):
spec = self._get_worker_spec(fn=_check_env_function, max_restarts=2)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_run_check_run_id(self):
def return_run_id():
return os.environ["TORCHELASTIC_RUN_ID"]
spec = self._get_worker_spec(fn=return_run_id, max_restarts=0)
agent = LocalElasticAgent(spec, start_method="fork")
ret = agent.run()
for i in range(spec.local_world_size):
self.assertEqual(spec.rdzv_handler.get_run_id(), ret[i])
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_get_worker_return_values(self):
spec = self._get_worker_spec(fn=_return_rank_times, args=(2,))
agent = LocalElasticAgent(spec, start_method="fork")
ret_vals = agent.run()
self.assertEqual(spec.local_world_size, len(ret_vals))
for i in range(spec.local_world_size):
self.assertEqual(i * 2, ret_vals[i])
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_happy(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# run one on the main process for debugging
_run_agent(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,))
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_fault_tolerance(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(nnodes):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# restart odd agents
for i in range(nnodes):
if i % 2 != 0:
procs[i].kill()
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, nnodes, nnodes, _distributed_sum, (0,)),
)
procs[i] = p
p.start()
for i in range(nnodes):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_double_agent_elastic(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
min_size = 1
max_size = 2
run_id = str(uuid.uuid4().int)
procs = []
for _ in range(max_size):
p = multiprocessing.Process(
target=_run_agent,
args=(run_id, host, port, min_size, max_size, _distributed_sum, (0,)),
)
procs.append(p)
p.start()
# kill odd agents
for i in range(max_size):
if i % 2 != 0:
procs[i].kill()
for i in range(max_size):
if i % 2 == 0:
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_torch_rpc(self):
"""
Simple torch rpc example with torchelastic.
Creates two agents (to simulate two node job),
each agent runs a single worker. worker0 calls an rpc_sync on
worker1.
"""
# TODO upstream this to torch.distributed.rpc so that users do not have
# to redundantly set rank as part of name (e.g. worker0) AND also pass
# it explicitly as an argument to rpc.init_rpc
def init_rpc(name_prefix, backend):
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
rpc.init_rpc(
name=f"{name_prefix}{rank}",
backend=backend,
rank=rank,
world_size=world_size,
)
def worker_0(queue, msg):
init_rpc("worker", BackendType.PROCESS_GROUP)
ret = rpc.rpc_sync(to="worker1", func=echo, args=(msg,))
queue.put(ret)
rpc.shutdown()
def worker_1():
init_rpc("worker", BackendType.PROCESS_GROUP)
rpc.shutdown()
def run_agent(
run_id, etcd_host, etcd_port, start_method, worker_fn, worker_args=()
):
rdzv_handler = dist.rendezvous(
f"etcd://{etcd_host}:{etcd_port}/{run_id}"
f"?min_workers=2"
f"&max_workers=2"
)
spec = WorkerSpec(
role="test_trainer",
local_world_size=1,
fn=worker_fn,
args=worker_args,
rdzv_handler=rdzv_handler,
max_restarts=3,
monitor_interval=1,
)
agent = LocalElasticAgent(spec, start_method)
agent.run()
run_id = str(uuid.uuid4().int)
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
start_method = "fork"
msg = "hello world"
mp_queue = multiprocessing.get_context(start_method).Queue()
agent0 = multiprocessing.Process(
target=run_agent,
args=(run_id, host, port, start_method, worker_0, (mp_queue, msg)),
)
agent1 = multiprocessing.Process(
target=run_agent, args=(run_id, host, port, start_method, worker_1, ())
)
agent0.start()
agent1.start()
agent0.join()
agent1.join()
self.assertEqual(msg, mp_queue.get())
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_workers_drift_success(self):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _simulate_work)
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, (10,), 2, "test_trainer", {}, 30),
)
procs.append(p)
p.start()
_run_agent(*default_args, (1,), 2, "test_trainer", {}, 30)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
@patch("torchelastic.utils.store.barrier")
@unittest.skipIf(is_tsan(), "test incompatible with tsan")
def test_workers_drift_fail(self, barrier_mock):
host = self._etcd_server.get_host()
port = self._etcd_server.get_port()
nnodes = 2
run_id = str(uuid.uuid4().int)
procs = []
default_args = (run_id, host, port, nnodes, nnodes, _simulate_work)
for _ in range(nnodes - 1):
p = multiprocessing.Process(
target=_run_agent,
args=(*default_args, (60,), 2, "test_trainer", {}, 10),
)
procs.append(p)
p.start()
_run_agent(*default_args, (1,), 2, "test_trainer", {}, 10)
barrier_mock.assert_called_once()
@patch("torchelastic.utils.store.barrier")
def test_barrier_failed(self, barrier_mock):
barrier_mock.side_effect = RuntimeError("test error")
spec = self._get_worker_spec(fn=_happy_function)
agent = LocalElasticAgent(spec, start_method="fork")
agent.run()
barrier_mock.assert_called_once()
| [
"[email protected]"
] | |
827e3bcf2fca26a7b7abc2fc74da531da077f856 | f6078890ba792d5734d289d7a0b1d429d945a03a | /extra/oconnorcollin_24162_1340359_Collin_O'Connor_1607318_ExtraCredit_week7.py | 768973e38dd5ce426badbd6677369e038d6aa08c | [] | no_license | huazhige/EART119_Lab | 1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0 | 47931d6f6a2c7bc053cd15cef662eb2f2027712c | refs/heads/master | 2020-05-04T23:40:53.709217 | 2019-06-11T18:30:45 | 2019-06-11T18:30:45 | 179,552,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,750 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 13 08:26:50 2019
@author: collin O'Connor
"""
import numpy as np
import integrate_utils as int_utils
#==============================================================================
#Question 1
#==============================================================================
"""
Numerical integration of difinite integrals:
ex: f(t) = 3t**2 * exp(t^3)
F(t) = exp(t^3)
between: a, b with F'(t) =f(t)
"""
#==============================================================================
#fn defs
#==============================================================================
def fct_f(t):
return 3*t**2 * np.exp(t**3)
def fct_F(t):
return np.exp(t**3)
###########integration fn##########
def trapezoidal(fct_x, x0, xn, N):
"""
composite trapezoidal method
impelmentation of eq 3.17 pg 60 in Linge & Langtangen
params:
fct_x = compute integral of the fn.
x0, xn = integration bounds
N = number of trapezoids
return:
value of definite integral of fct_x
between x0 and xn
"""
dx = float(xn-x0)/N
#wrtite sum as for loop
f_Integ = 0.5*(fct_x(x0) + fct_x(xn))
for i in range(1, N):
f_Integ += fct_x(x0 + i*dx)
## write sum in vectorized form
#f_Integ = 0.5*(fct_x(x0) + fct_x(xn)) + (fct_x(x0 + dx*np.arange(1, N, 1))).sum()
return dx*f_Integ
def midpoint( fct_x, x0, xn, N):
"""
Composite Midpoint method, eq. 3.21 page 66 in Linge & Langtangen
:param fct_x: - function whose integral is in question
:param x1: - integration bounds
:param x2: - integration bounds
:param N: - number of trapezoids, chose high number for high accuracy
:return: - integral of fct_x between x0 and xn
"""
dx = float( xn-x0)/N
a_xi = x0 + 0.5*dx + np.arange( N)*dx
f_int = dx*( fct_x(a_xi)).sum()
return f_int
#==============================================================================
#parameters
#==============================================================================
xmin, xmax = 0, 1
N = 1000
#==============================================================================
#num integration and plotting
#==============================================================================
#exact solution
f_IntExact = fct_F(xmax) - fct_F(xmin)
#Trapazoidal method numerical approximation
f_IntNum = trapezoidal(fct_f, xmin, xmax, N)
#Midpoint method numerical approximation
f_mid=midpoint(fct_f, xmin, xmax, N)
#compare exact and Numerical
print "Question 1:"
print 'exact integral: ', f_IntExact
print 'Trapazoidal Method Numerical approx.: ', f_IntNum
print 'Midpoint Method Numerical approx.: ', f_mid
print
#==============================================================================
#Question 2
#==============================================================================
"""Compute mean value of fns
and compare to the definite integral:
f(x)=sin(x)
g(x)=2x*exp(x**2)
"""
def fx(x):
return np.sin(x)
def gx(x):
return 2*x * np.exp(x**2)
def mean_val(integral_fx, xmax, xmin):
return (1/(xmax - xmin)) * integral_fx
print "Question 2:"
print 'mean value of f(x): ', round(mean_val(trapezoidal(fx,0, np.pi, 1000), np.pi, 0), 3)
print 'integral of f(x): ', round(trapezoidal(fx,0, np.pi, 1000), 3)
print 'mean value of g(x): ', round(mean_val(trapezoidal(gx, 0, 1, 1000), 1, 0), 3)
print 'Integral of g(x): ', round(trapezoidal(gx, 0, 1, 1000), 3)
print
#==============================================================================
#Question 3
#==============================================================================
#================================================
# fct definition
#================================================
def fct2_xy( x, y):
return (x**2 + y**2)**0.5
def fct_xy( x, y):
return x*(y**2)
def fct_gxy( x, y):
"""
- rectangular domain
return: -1 for points outside
"""
f_retVal = -1
if x >= xmin and x <= xmax and y >= ymin and y <= ymax:
f_retVal = 1
return f_retVal
def fct_Fxy_exact(x, y):
return (0.5*(x**2))+ ((1./3)*(y**3))
def fct_exact(r, theta):
return theta*((r**3)/3.)
#================================================
# parameters
#================================================
xmin, xmax = 0, 2
ymin, ymax = 0, 1.5
rmin, rmax = 0, 2
theta_min, theta_max = 0, 2*np.pi
#================================================
# compute integral
#================================================
#compute definite integral
print "Question 3:"
print ('exact solution part a: ', round(fct_exact(rmax, theta_max) - fct_exact(rmin, theta_min), 3))
print 'monte Carlo solution part a: '
for n in np.arange(100, 1200, 200):
gInt = int_utils.monteCarlo(fct2_xy, fct_gxy, rmin, rmax, theta_min, theta_max, n)
#in unt_utils the MonteCarlo method results was supposed to be squared, but they never were.
gInt = gInt**2
print 'no. random points', n, 'number integral', round(gInt, 3)
print
print('exac. sol part b: ', round(fct_Fxy_exact(xmax, ymax) - fct_Fxy_exact(xmin, ymin), 3))
print 'monte Carlo solution part b: '
for n in np.arange(100, 1200, 200):
fInt=int_utils.monteCarlo(fct_xy, fct_gxy, xmin+1, xmax+1, ymin, ymax, n )
#in unt_utils the MonteCarlo method results was supposed to be squared, but they never were.
fInt = (fInt**2)
print 'no. random points', n, 'number integral', round(fInt, 3)
| [
"[email protected]"
] | |
69fe2635469cacf0543c8bdc6588c35e1ff15509 | aa1972e6978d5f983c48578bdf3b51e311cb4396 | /nitro-python-1.0/nssrc/com/citrix/netscaler/nitro/resource/config/network/vrid6_interface_binding.py | 514e8bd208b3024680ba6c2b0c5d4530d8b2a8a3 | [
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | MayankTahil/nitro-ide | 3d7ddfd13ff6510d6709bdeaef37c187b9f22f38 | 50054929214a35a7bb19ed10c4905fffa37c3451 | refs/heads/master | 2020-12-03T02:27:03.672953 | 2017-07-05T18:09:09 | 2017-07-05T18:09:09 | 95,933,896 | 2 | 5 | null | 2017-07-05T16:51:29 | 2017-07-01T01:03:20 | HTML | UTF-8 | Python | false | false | 6,678 | py | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vrid6_interface_binding(base_resource) :
""" Binding class showing the interface that can be bound to vrid6.
"""
def __init__(self) :
self._ifnum = None
self._vlan = None
self._flags = None
self._id = None
self.___count = 0
@property
def id(self) :
r"""Integer value that uniquely identifies a VMAC6 address.<br/>Minimum value = 1<br/>Maximum value = 255.
"""
try :
return self._id
except Exception as e:
raise e
@id.setter
def id(self, id) :
r"""Integer value that uniquely identifies a VMAC6 address.<br/>Minimum value = 1<br/>Maximum value = 255
"""
try :
self._id = id
except Exception as e:
raise e
@property
def ifnum(self) :
r"""Interfaces to bind to the VMAC6, specified in (slot/port) notation (for example, 1/2).Use spaces to separate multiple entries.
"""
try :
return self._ifnum
except Exception as e:
raise e
@ifnum.setter
def ifnum(self, ifnum) :
r"""Interfaces to bind to the VMAC6, specified in (slot/port) notation (for example, 1/2).Use spaces to separate multiple entries.
"""
try :
self._ifnum = ifnum
except Exception as e:
raise e
@property
def flags(self) :
r"""Flags.
"""
try :
return self._flags
except Exception as e:
raise e
@property
def vlan(self) :
r"""The VLAN in which this VRID resides.
"""
try :
return self._vlan
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vrid6_interface_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vrid6_interface_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.id is not None :
return str(self.id)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vrid6_interface_binding()
updateresource.id = resource.id
updateresource.ifnum = resource.ifnum
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vrid6_interface_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].id = resource[i].id
updateresources[i].ifnum = resource[i].ifnum
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vrid6_interface_binding()
deleteresource.id = resource.id
deleteresource.ifnum = resource.ifnum
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vrid6_interface_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].id = resource[i].id
deleteresources[i].ifnum = resource[i].ifnum
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, id="", option_="") :
r""" Use this API to fetch vrid6_interface_binding resources.
"""
try :
if not id :
obj = vrid6_interface_binding()
response = obj.get_resources(service, option_)
else :
obj = vrid6_interface_binding()
obj.id = id
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, id, filter_) :
r""" Use this API to fetch filtered set of vrid6_interface_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, id) :
r""" Use this API to count vrid6_interface_binding resources configued on NetScaler.
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, id, filter_) :
r""" Use this API to count the filtered set of vrid6_interface_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vrid6_interface_binding()
obj.id = id
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class vrid6_interface_binding_response(base_response) :
def __init__(self, length=1) :
self.vrid6_interface_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vrid6_interface_binding = [vrid6_interface_binding() for _ in range(length)]
| [
"[email protected]"
] | |
b1480d429e722377d42d9397f4d3dd57384079cc | 0add471d82edab23894421dc17429da39b92cc73 | /heaviside/ast.py | db8d24717141b5929f91b3a0e6c2f4d5e1c6f583 | [
"Apache-2.0"
] | permissive | MHova/heaviside | fac09ae7608306665ee01a46baa2197dc81d649d | 9ee9e69343c58124b8c7a119888a195794978cd6 | refs/heads/master | 2020-05-30T10:08:11.110234 | 2019-05-30T20:16:52 | 2019-05-30T20:16:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,872 | py | # Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from collections import OrderedDict
from funcparserlib.parser import (some, a, skip)
from .lexer import Token
from .exceptions import CompileError
from .utils import isstr
#################################################
#### Load AWS Service Integration Definitions####
#################################################
import os
import json
cur_dir = os.path.dirname(__file__)
definitions = os.path.join(cur_dir, 'aws_services.json')
with open(definitions, 'r') as fh:
AWS_SERVICES = json.load(fh)
#################################################
#################################################
# AST Objects
class ASTNode(object):
def __init__(self, token=None):
if token is not None:
self.token = token
else:
self.token = None
@property
def lineno(self):
return self.token.start[0] if self.token else 0
@property
def pos(self):
return self.token.start[1] if self.token else 0
@property
def line(self):
return self.token.line if self.token else ''
def raise_error(self, msg):
raise CompileError(self.lineno,
self.pos,
self.line,
msg)
class ASTValue(ASTNode):
def __init__(self, value, token):
super(ASTValue, self).__init__(token)
self.value = value
def __repr__(self):
return "ASTValue({!r})".format(self.value)
class ASTCompOp(ASTNode):
def __init__(self, var, op, val):
# Use the first token of the expression
super(ASTCompOp, self).__init__(var.token)
self.var = var
self.op = op
self.val = val
def __repr__(self):
return "ASTCompOp({!r} {!r} {!r})".format(self.var, self.op, self.val)
class ASTCompNot(ASTNode):
def __init__(self, not_, comp):
super(ASTCompNot, self).__init__(not_.token)
self.comp = comp
def __repr__(self):
return "ASTCompNot({!r})".format(self.comp)
class ASTCompAndOr(ASTNode):
op = None
def __init__(self, comp, comps):
super(ASTCompAndOr, self).__init__(comp.token)
self.comps = [comp]
for c in comps:
self.comps.append(c)
def __repr__(self):
return "ASTComp{}({!r})".format(self.op, self.comps)
class ASTCompAnd(ASTCompAndOr):
op = 'And'
class ASTCompOr(ASTCompAndOr):
op = 'Or'
class ASTModKV(ASTValue):
def __init__(self, key, value):
super(ASTModKV, self).__init__(value, key.token)
def __repr__(self):
return "<ASTModKV {}:{}>".format(self.name, self.value)
class ASTModNext(ASTModKV):
name = 'Next'
class ASTModTimeout(ASTModKV):
name = 'Timeout'
class ASTModHeartbeat(ASTModKV):
name = 'Heartbeat'
class ASTModInput(ASTModKV):
name = 'Input'
class ASTModResult(ASTModKV):
name = 'Result'
class ASTModOutput(ASTModKV):
name = 'Output'
class ASTModData(ASTModKV):
name = 'Data'
class ASTModParameters(OrderedDict, ASTNode):
name = 'Parameters'
# NOTE: kpv stands for (key, path marker, value)
# where `path marker` is the token for the `$` that denotes is the
# value contains a JsonPath
def __init__(self, parameters, kpv, kpvs):
OrderedDict.__init__(self)
ASTNode.__init__(self, parameters.token)
self.add_parameter(kpv)
for kpv in kpvs:
self.add_parameter(kpv)
def add_parameter(self, kpv):
k,p,v = kpv
if p is not None:
k.value += '.$' # Parameters that use a JsonPath must have the key
# end with `.$`
self[k] = v
class ASTModRetry(ASTNode):
name = 'Retry'
def __init__(self, retry, errors, interval, max, backoff):
super(ASTModRetry, self).__init__(retry.token)
self.errors = errors
self.interval = interval
self.max = max
self.backoff = backoff
class ASTModCatch(ASTNode):
name = 'Catch'
def __init__(self, catch, errors, path, block):
super(ASTModCatch, self).__init__(catch.token)
self.errors = errors
self.path = path
self.block = block
class ASTModifiers(ASTNode): #??? Subclass dict as well?
def __init__(self, mod, mods):
super(ASTModifiers, self).__init__(mod.token)
self.mods = {}
self.add(mod)
for m in mods:
self.add(m)
def add(self, mod):
t = type(mod)
if t not in self.mods:
self.mods[t] = []
self.mods[t].append(mod)
def update(self, other):
for key in other.mods.keys():
if key not in self.mods:
self.mods[key] = []
self.mods[key].extend(other.mods[key])
def __repr__(self):
return "\n".join(repr(mod) for mod in self.mods.values())
class ASTState(ASTNode):
state_type = ''
valid_modifiers = []
multi_modifiers = [ASTModRetry, ASTModCatch]
def __init__(self, state, block):
super(ASTState, self).__init__(state.token)
self.next = None
self.end = False
if block:
comment, modifiers = block
else:
comment = None
modifiers = None
if comment:
tmp = comment.value.split('\n', 1)
if len(tmp) == 1:
self.name = tmp[0].strip()
self.comment = None
else:
name, comment = tmp
self.name = name.strip()
self.comment = '\n'.join([l.strip() for l in comment.split('\n')])
else:
self.name = 'Line{}'.format(self.lineno)
self.comment = None
def get(type_):
if modifiers is None:
return None
vals = modifiers.mods.get(type_)
if vals is None:
return None
del modifiers.mods[type_]
name = type_.name if hasattr(type_, 'name') else str(type_.__name__)
if type_ not in self.valid_modifiers:
vals[0].raise_error("{} state cannot contain a {} modifier".format(self.state_type, name))
if type_ not in self.multi_modifiers:
if len(vals) > 1:
vals[1].raise_error("{} state can only contain one {} modifier".format(self.state_type, name))
vals = vals[0]
return vals
self.next = get(ASTModNext)
self.timeout = get(ASTModTimeout)
self.heartbeat = get(ASTModHeartbeat)
self.input = get(ASTModInput)
self.result = get(ASTModResult)
self.output = get(ASTModOutput)
self.data = get(ASTModData)
self.parameters = get(ASTModParameters)
self.retry = get(ASTModRetry)
self.catch = get(ASTModCatch)
if modifiers is not None and len(modifiers.mods) > 0:
type_ = list(modifiers.mods.keys())[0]
modifiers.mods[type_][0].raise_error("Unknown state modifer '{}'".format(type_))
def __repr__(self):
return "<ASTState {}:{}>".format(self.state_type, self.name)
class ASTStatePass(ASTState):
state_type = 'Pass'
valid_modifiers = [ASTModInput, ASTModResult, ASTModOutput, ASTModData]
class ASTStateGoto(ASTStatePass):
"""Custom version of Pass that exposes / sets the 'next' modifier"""
valid_modifiers = [ASTModNext]
def __init__(self, state, label):
# Create the state_modifer block manually
block = (None,
ASTModifiers(
ASTModNext(label, label.value),
[]
)
)
super(ASTStateGoto, self).__init__(state, block)
class ASTStateSuccess(ASTState):
state_type = 'Succeed'
valid_modifiers = [ASTModInput, ASTModOutput]
class ASTStateFail(ASTState):
state_type = 'Fail'
def __init__(self, state, error, cause, block):
super(ASTStateFail, self).__init__(state, block)
self.error = error
self.cause = cause
class ASTStateTask(ASTState):
state_type = 'Task'
valid_modifiers = [ASTModTimeout,
ASTModHeartbeat,
ASTModInput,
ASTModResult,
ASTModOutput,
ASTModParameters,
ASTModRetry,
ASTModCatch]
valid_services = ['Arn',
'Lambda',
'Activity']
def __init__(self, service, function, name, block):
super(ASTStateTask, self).__init__(service, block)
if service.value not in self.valid_services and \
service.value not in AWS_SERVICES.keys():
service.raise_error('Invalid Task service')
if function is None:
if service.value in ('Lambda', 'Activity', 'Arn'):
if name is None:
service.raise_error('{} task requires a function name argument'.format(service.value))
function = name
name = None
else:
service.raise_error('{} task requires a function to call'.format(service.value))
else:
if service.value in ('Lambda', 'Activity', 'Arn'):
function.raise_error('Unexpected function name')
else:
try:
function.lookup = function.value # Save value for looking up when checking kwargs
function.value = AWS_SERVICES[service.value][function.value]['name']
except KeyError:
function.raise_error('Invalid Task function')
if name is not None:
name.raise_error('Unexpected argument')
if service.value == 'Arn' and not function.value.startswith('arn:aws:'):
function.raise_error("ARN must start with 'arn:aws:'")
if service.value in ('Lambda', 'Activity') and self.parameters is not None:
tuple(self.parameters.keys())[0].raise_error('Unexpected keyword argument')
if service.value not in ('Lambda', 'Activity', 'Arn'):
required = AWS_SERVICES[service.value][function.lookup]['required_keys']
required = copy.copy(required) # will be mutating to determine missing required arguments
optional = AWS_SERVICES[service.value][function.lookup]['optional_keys']
sync = AWS_SERVICES[service.value][function.lookup]['sync']
if self.parameters:
# self.parameters can be None either if no `parameters:` block is provided
# or if there is a syntax error in the `parameters:` block
for key in self.parameters.keys():
k = key.value
if k.endswith('.$'):
k = k[:-2] # remove the `.$`, which donates that the key uses a JsonPath
if k == 'sync':
sync = self.parameters[key]
if type(sync) != bool:
key.raise_error("Synchronous value must be a boolean")
del self.parameters[key]
elif k in required:
required.remove(k)
elif k not in optional:
key.raise_error("Invalid keyword argument")
if sync == True:
function.value += '.sync'
if len(required) > 0:
missing = ", ".join(required)
function.raise_error("Missing required keyword arguments: {}".format(missing))
self.service = service
self.function = function
class ASTStateWait(ASTState):
state_type = 'Wait'
valid_modifiers = [ASTModInput, ASTModOutput]
def __init__(self, state, wait_type, wait_val, block):
super(ASTStateWait, self).__init__(state, block)
self.type = wait_type
self.val = wait_val
class ASTStateChoice(ASTState):
state_type = 'Choice'
valid_modifiers = [ASTModInput, ASTModOutput]
DEFAULT = None
def __init__(self, state, comment, transform):
super(ASTStateChoice, self).__init__(state, (comment, transform))
# Use an OrderedDict so that logic comparisons happen in the
# same order as in the source file
self.branches = OrderedDict()
# DP ???: Should ASTStateChoice subclasses override the state_type value?
class ASTStateWhile(ASTStateChoice):
def __init__(self, state, comp, block, transform):
comment, states = block
super(ASTStateWhile, self).__init__(state, comment, transform)
self.branches[comp] = states
class ASTStateIfElse(ASTStateChoice):
def __init__(self, state, comp, block, elif_, else_, transform):
comment, states = block
super(ASTStateIfElse, self).__init__(state, comment, transform)
self.branches[comp] = states
if elif_ is not None:
for comp_, states_ in elif_:
self.branches[comp_] = states_
if else_ is not None:
self.branches[ASTStateChoice.DEFAULT] = else_
class ASTStateSwitch(ASTStateChoice):
def __init__(self, state, var, comment, cases, default, transform):
super(ASTStateSwitch, self).__init__(state, comment, transform)
class EQToken(object):
def __init__(self):
self.value = '=='
eq = EQToken()
for case, val, states in cases:
comp = ASTCompOp(var, eq, val)
self.branches[comp] = states
if default is not None:
default, states = default
self.branches[ASTStateChoice.DEFAULT] = states
class ASTParallelBranch(ASTNode):
def __init__(self, parallel, states):
super(ASTParallelBranch, self).__init__(parallel.token)
self.states = states
class ASTStateParallel(ASTState):
state_type = 'Parallel'
valid_modifiers = [ASTModInput,
ASTModResult,
ASTModOutput,
ASTModRetry,
ASTModCatch]
def __init__(self, state, block, parallels, transform, error):
comment, states = block
if transform is not None and error is not None:
transform.update(error)
elif transform is None and error is not None:
transform = error
super(ASTStateParallel, self).__init__(state, (comment, transform))
self.branches = [ASTParallelBranch(state, states)]
if parallels is not None:
for parallel, states_ in parallels:
self.branches.append(ASTParallelBranch(parallel, states_))
class ASTModVersion(ASTModKV):
pass
class ASTStepFunction(ASTNode):
def __init__(self, comment, version, timeout, states):
super(ASTStepFunction, self).__init__() # ???: use the first states's token?
self.comment = comment
self.version = version
self.timeout = timeout
self.states = states
def __repr__(self):
return "\n".join(repr(state) for state in self.states)
##############################
# AST Modification Functions #
TERMINAL_STATES = (
ASTStateSuccess,
ASTStateFail
)
def link_branch(branch):
"""Helper method for link() that reassigns the results to the given branch"""
if hasattr(branch, 'states'):
branch.states = link(branch.states)
else:
branch.raise_error("Trying to link non-branch state")
return branch
def link(states, final=None):
"""AST Transform function that links together the states of a branch
Performs the following actions:
* Sets the next / end attributes for all encountered ASTState objects
* If the final state is a ASTStateChoice and there is no default state
one is created, as you cannot terminate on a Choice state
* Makes the ASTStateWhile into a full loop
* If there is a Catch modifier or the state is a Choice state the sub-states
for each are recursivly linked and pulled up to the current level
* The branches for all Parallel states are recursivly linked
Args:
states (list) : List of ASTState objects
final (String) : Name of the next state to link the final state to
or None to have the final state terminate
Returns:
list : List of ASTState objects with end/next set
Note: This is a different list than the input list
"""
linked = []
total = len(states)
for i in range(total):
state = states[i]
linked.append(state)
next_ = states[i+1].name if i+1 < total else final
if state.next is not None:
pass # State has already been linked
elif isinstance(state, TERMINAL_STATES):
pass
elif isinstance(state, ASTStateChoice):
if ASTStateChoice.DEFAULT not in state.branches:
next__ = next_ # prevent branches from using the new end state
if next__ is None:
# Choice cannot be terminal state, add a Success state to
# terminate on
next__ = ASTStateSuccess(state, None)
next__.name = state.name + "Default"
linked.append(next__)
next__ = next__.name
state.branches[ASTStateChoice.DEFAULT] = next__
# Point the last state of the loop to the conditional, completing the loop construct
if isinstance(state, ASTStateWhile):
key = list(state.branches.keys())[0]
state_ = state.branches[key][-1]
if not isinstance(state_, TERMINAL_STATES):
state_ = ASTStatePass(state, None)
state_.name = state.name + "Loop"
state_.next = state.name
state.branches[key].append(state_)
else:
state.end = next_ is None
state.next = next_
if state.catch is not None:
for catch in state.catch:
states_ = catch.block
linked_ = link(states_, final=next_)
catch.next = linked_[0].name
linked.extend(linked_)
# Different states use the branches variable in different ways
if isinstance(state, ASTStateChoice):
for key in state.branches:
states_ = state.branches[key]
if isstr(states_):
continue # already linked
linked_ = link(state.branches[key], final=next_)
# convert the branch from a list of states to the name of the next state
# this is done because the branch states are moved to the appropriate
# location for the step function
state.branches[key] = linked_[0].name
linked.extend(linked_)
elif isinstance(state, ASTStateParallel):
for branch in state.branches:
link_branch(branch)
return linked
MAX_NAME_LENGTH = 128
def check_names(branch):
"""Recursivly checks all state names to ensure they are valid
Checks performed:
* Name is not greater than 128 characters
* No duplicate state names
Args:
branch (list): List of ASTState objects
Raises:
CompileError : If any of the checks fail
"""
if not hasattr(branch, 'states'):
branch.raise_error("Trying to check names for non-branch state")
to_process = [branch.states]
while len(to_process) > 0:
states = to_process.pop(0)
names = set() # State names are unique to the branch
for state in states:
if len(state.name) > MAX_NAME_LENGTH:
state.raise_error("Name exceedes {} characters".format(MAX_NAME_LENGTH))
if state.name in names:
state.raise_error("Duplicate state name '{}'".format(state.name))
names.add(state.name)
if isinstance(state, ASTStateParallel):
for branch in state.branches:
to_process.append(branch.states)
def resolve_arns(branch, region = '', account_id = ''):
"""AST Transform that sets the `arn` attribute for ASTStateTasks
Args:
branch (list): List of ASTState objects
region (str): AWS Region where the Lambdas / Activities reside
account_id (str): AWS Account ID where the Lambdas / Activities reside
"""
if not hasattr(branch, 'states'):
branch.raise_error("Trying to resolve arns for non-branch state")
for state in branch.states:
if isinstance(state, ASTStateTask):
if state.service.value == 'Arn':
# ARN value already checked for 'arn:aws:' prefix in ASTStateTask constructor
state.arn = state.function.value
else:
# arn:partition:service:region:account:task_type:name
if state.service.value == 'Lambda':
service = 'lambda'
task_type = 'function'
else:
service = 'states'
task_type = state.service.value.lower()
if state.service.value not in ('Lambda', 'Activity'):
region = ''
account_id = ''
parts = ['arn', 'aws',
service,
region,
account_id,
task_type,
state.function.value]
state.arn = ":".join(parts)
elif isinstance(state, ASTStateParallel):
for branch in state.branches:
resolve_arns(branch, region, account_id)
def verify_goto_targets(branch):
"""Recursivly checks that all Goto states target valid state names
Valid state names are those states in the current branch. This means that
a Goto cannot target states in another parallel branch or from a parallel
branch to the main body of the Step Function
Args:
branch (list): List of ASTState objects
Raises:
CompileError : If a Goto state targets an invalid state
"""
if not hasattr(branch, 'states'):
branch.raise_error("Trying to check goto targets for non-branch state")
to_process = [branch.states]
while len(to_process) > 0:
states = to_process.pop(0)
names = set() # Need to know all of the valid state names for the branch
for state in states:
names.add(state.name)
if isinstance(state, ASTStateParallel):
for branch in state.branches:
to_process.append(branch.states)
for state in states:
if isinstance(state.next, ASTModNext):
if state.next.value not in names:
state.next.raise_error("Goto target '{}' doesn't exist".format(state.next.value))
class StateVisitor(object):
"""Generic base class for heaviside users to create a visitor that can modify
ASTStateTasks
"""
def dispatch(self, state):
"""Dispatch the given state to the approprate handler function
Args:
state (ASTState): State to dispatch
"""
if isinstance(state, ASTStateTask):
self.handle_task(state)
else:
raise ValueError('State type {} not supported'.format(type(state)))
def visit(self, branch):
"""Visit all states in all branches of the state machine and dispatch
them to be handled the subclass
Args:
branch (list): List of ASTState objects
"""
if not hasattr(branch, 'states'):
raise ValueError("Trying to visit non-branch state: {}".format(branch))
for state in branch.states:
self.dispatch(state)
if isinstance(state, ASTStateParallel):
for branch in state.branches:
self.visit(branch)
def handle_task(self, state):
"""ASTStateTask handler function placeholder
Args:
state (ASTStateTask): State to handle
"""
pass
| [
"[email protected]"
] | |
4e6f7728e1ccc0ee08f9eab26f26539c32f245f1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02779/s326665097.py | af3b88d1cfb1a25941fa607ee97b983b715cf65f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | from collections import Counter
n = int(input())
a = list(map(int, input().split()))
c = Counter(a)
new = c.values()
for i in new:
if i != 1:
re = "NO"
break
else:
re = "YES"
print(re)
| [
"[email protected]"
] | |
2bc1c55aa465d41767f5a4a17e88f2902fa650a2 | 115b5356242176b8873ae7e43cd313e41cbd0ee6 | /webstuff/webscraper/tidytext.py | 057e7c3a5a8d77e0574b55b38fb0fe5b7a3b444a | [] | no_license | squeakus/bitsandbytes | b71ec737431bc46b7d93969a7b84bc4514fd365b | 218687d84db42c13bfd9296c476e54cf3d0b43d2 | refs/heads/master | 2023-08-26T19:37:15.190367 | 2023-07-18T21:41:58 | 2023-07-18T21:42:14 | 80,018,346 | 2 | 4 | null | 2022-06-22T04:08:35 | 2017-01-25T13:46:28 | C | UTF-8 | Python | false | false | 976 | py | from BeautifulSoup import BeautifulSoup
import re
page = open('out2.txt','r')
for idx,line in enumerate(page):
parts = line.split(';')
for part in parts:
#print part, '\n'
if part.startswith('var point = new GLatLng'):
print "\n", part.lstrip('var point = new GLatLng')
m = re.search('table(.+?)table', line)
if m:
found = m.group(1)
found = '<table' + found +'table>'
found = found.replace('\\','')
soup = BeautifulSoup(found)
info = soup.findAll('tr',{'class':'wind_row'})
name = soup.findAll('a')
print name[0].text
for data in info:
direction = str(data.find('img'))
direction = direction.rstrip('.png" />')
direction = direction.lstrip('<img src="images/wind/')
print direction
n = re.search('Wind:(.+?)km', str(data))
if n:
speed = n.group(1)
print speed
| [
"[email protected]"
] | |
d091376ea903c1328ac580659f780419ba14131f | 5f834f8aa0603f4f7adc56fdcd5e227538931f81 | /diab_logisReg.py | 2ce02d3f7c7d13896c4c7c0870cb4b25f1af7a59 | [] | no_license | Kamal-prog-code/HealthCare | d9a613bcb315a04b14feead97bb4367034f91606 | 2d2fe464a5d25c1373634663dc1eaf07a9064a30 | refs/heads/main | 2023-01-20T22:17:55.157525 | 2020-12-05T20:50:03 | 2020-12-05T20:50:03 | 318,627,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,462 | py | from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SparkSession
from pyspark.ml.feature import StandardScaler
from pyspark.ml.classification import LogisticRegression
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.classification import RandomForestClassifier
import pickle
import os
spark = SparkSession.builder.appName('HSP').getOrCreate()
df=spark.read.csv('hdfs://localhost:9000/user/BigDataProj/diab.csv',inferSchema=True,header=True)
from pyspark.sql.functions import col
from sklearn.linear_model import LogisticRegression
new_data = df.select(*(col(c).cast("float").alias(c) for c in df.columns))
from pyspark.sql.functions import col,count,isnan,when
from sklearn.preprocessing import StandardScaler
new_data.select([count(when(col(c).isNull(),c)).alias(c) for c in new_data.columns]).show()
cols=new_data.columns
cols.remove("Outcome")
assembler = VectorAssembler(inputCols=cols,outputCol="features")
data=assembler.transform(new_data)
# data.select("features",'Outcome').show(truncate=False)
train, tesT = df.randomSplit([0.7, 0.3])
x_col = new_data.columns
x_train = train.toPandas()[x_col[:-1]].values
y_train = train.toPandas()['Outcome'].values
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
cls = LogisticRegression()
cls.fit(x_train,y_train)
save_path = 'prediction/'
completeName = os.path.join(save_path, "dblogR.pkl")
pickle.dump(cls, open(completeName, 'wb'))
| [
"[email protected]"
] | |
b25f51bd9909f386f89f3058a2323e1d1b8c133f | 6c2608bc87b522da77c792e20330989de17b3005 | /Chap-7/ex179.py | 43c10f227985eb4652d2196644fcc6bc8c504dfe | [] | no_license | AleByron/AleByron-The-Python-Workbook-second-edition | 8a0b408c1bbd90c82e6b837fc898ee10341ca8fa | 491b2fd394aa04e29a4b2dbe9a615c547e239028 | refs/heads/main | 2023-01-13T21:01:17.757669 | 2020-11-11T01:29:28 | 2020-11-11T01:29:28 | 306,487,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | def square(n,g):
if 0<n-g**2<10**-12:
return g
else:
g = square(n,(g+(n/g))/2)
return g
def main():
n = 32
g = 1
print(square(n,g))
main() | [
"[email protected]"
] | |
bfb90c8755e3b83e9062e88376453a3cfeeee7ec | 9c2edc273db48dcb6d31a937510476b7c0b0cc61 | /pyopengl_sample/tutorial1.py | 0fd92b98d815dd26d6457ba6f9ac33791867e7e0 | [] | no_license | miyamotok0105/python_sample | 4d397ac8a3a723c0789c4c3e568f3319dd754501 | 77101c981bf4f725acd20c9f4c4891b29fbaea61 | refs/heads/master | 2022-12-19T22:53:44.949782 | 2020-05-05T05:09:22 | 2020-05-05T05:09:22 | 81,720,469 | 1 | 0 | null | 2022-11-22T02:22:55 | 2017-02-12T11:15:08 | Jupyter Notebook | UTF-8 | Python | false | false | 391 | py | #!/usr/bin/python
from OpenGL.GL import *
from OpenGL.GLUT import *
def draw():
glClearColor(1.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glFlush()
glutSwapBuffers()
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(320, 240)
glutCreateWindow("PyOpenGL 1")
glutDisplayFunc(draw)
glutMainLoop()
| [
"[email protected]"
] | |
0d918889f8d20d3a4695849eb65eab1ae2ad9c9d | edfd1db2b48d4d225bc58be32fbe372a43415112 | /team-task/airflow2.0/dags/efirmant/lesson3.challenge2.py | ea755ca01b0b78617310f0d87c4b0b0748206373 | [] | no_license | rwidjojo/airflow-training | ed83cb9e97ca85ef06de1426f2f41014881a1f22 | ac82040d8ddc3859df5576eee08d397e824016f1 | refs/heads/main | 2023-08-12T21:01:17.672059 | 2021-01-04T09:17:48 | 2021-01-04T09:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import logging
from datetime import timedelta
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.hooks.postgres_hook import PostgresHook
owner = 'efirmant' # Replace with your short name
default_args = {
'owner': owner,
'depends_on_past': False,
'start_date': days_ago(2),
}
dag = DAG(
f'{owner}.lesson3.challenge2',
default_args=default_args,
description='Read data from postgresql',
schedule_interval=None,
)
def read_data():
db_conn = PostgresHook(postgres_conn_id='efirmant_posgres2')
result = db_conn.get_records('SELECT order_date, count(order_id) from efirmant_orders GROUP BY order_date')
for row in result:
logging.info(row)
read_task = PythonOperator(
task_id="read",
python_callable=read_data,
dag=dag
) | [
"[email protected]"
] | |
3b97278167640c790740fbd6e9a435d1e87ce6e0 | baaa8c9486e02f4232f4926cf4e1a2eeee1199b4 | /accounts/admin.py | 2395fb1f93dfca90cba93acc7edf2da53b6c172c | [] | no_license | bondarenkoav/helpdesk | b2be867605d484c34aaea4d8bea876c633947f14 | 866ea2dc6ee5182d6310d800b301270b38490fd2 | refs/heads/master | 2023-01-08T09:44:15.852016 | 2022-12-28T10:53:39 | 2022-12-28T10:53:39 | 93,615,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from accounts.models import Profile
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name = u'Профиль'
verbose_name_plural = u'Профиль'
fk_name = 'user'
class CustomUserAdmin(UserAdmin):
inlines = (ProfileInline, )
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(CustomUserAdmin, self).get_inline_instances(request, obj)
# class ProfileInline(admin.StackedInline):
# model = Profile
# can_delete = False
# verbose_name = u'Профиль'
# verbose_name_plural = u'Профиль'
# fk_name = 'user'
#
#
# # @admin.register(User)
# class CustomUserAdmin(UserAdmin):
# inlines = (ProfileInline, )
# list_display = ('username', 'last_name', 'first_name', 'is_active',
# 'get_phone', 'get_birthday', 'get_groups', 'get_location')
# list_filter = ('is_active', 'groups')
# search_fields = ('username', 'first_name', 'last_name')
#
# list_select_related = True
#
# def get_groups(self, instance):
# list_groups = ''
# for group in instance.groups.all():
# if list_groups == '':
# list_groups = group.name
# else:
# list_groups = list_groups + ', ' + group.name
# return list_groups
# get_groups.short_description = u'Группы'
#
# def get_location(self, instance):
# return instance.profile.location
# get_location.short_description = u'Город'
#
# def get_birthday(self, instance):
# return instance.profile.birthday
# get_birthday.short_description = u'Дата рождения'
#
# def get_phone(self, instance):
# return instance.profile.phone
# get_phone.short_description = u'Номер'
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin) | [
"[email protected]"
] | |
8e291920bc9258758fe57e54877cada173a13eef | 63bf6161532eefa72aa3be8b01cde601b08507dc | /python-mapping-example/fhir_model_generator/tests/model/slot_tests.py | ad3cec096349f05c2c4414e7b0a4ae6fc7aac7a8 | [
"Apache-2.0"
] | permissive | Healthedata1/mFHIR | 4ef370b87e03e973918e5683977d32fe262655bc | 1b4ea441cfa08b661416a3badedf7e90f2809163 | refs/heads/master | 2022-12-10T21:07:03.948406 | 2021-06-18T01:58:23 | 2021-06-18T01:58:23 | 129,964,251 | 9 | 5 | null | 2022-12-09T05:23:54 | 2018-04-17T20:57:15 | HTML | UTF-8 | Python | false | false | 6,767 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.1-9346c8cc45 on 2020-02-10.
# 2020, SMART Health IT.
import os
import io
import unittest
import json
from model import slot
from model.fhirdate import FHIRDate
class SlotTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or \
os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'fhir-parser', 'downloads'))
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("Slot", js["resourceType"])
return slot.Slot(js)
def testSlot1(self):
inst = self.instantiate_from("slot-example-busy.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot1(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot1(inst2)
def implSlot1(self, inst):
self.assertEqual(inst.comment, "Assessments should be performed before requesting appointments in this slot.")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:15:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:15:00Z")
self.assertEqual(inst.id, "1")
self.assertEqual(inst.identifier[0].system, "http://example.org/identifiers/slots")
self.assertEqual(inst.identifier[0].value, "123132")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertTrue(inst.overbooked)
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:00:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:00:00Z")
self.assertEqual(inst.status, "busy")
self.assertEqual(inst.text.status, "generated")
def testSlot2(self):
inst = self.instantiate_from("slot-example.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot2(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot2(inst2)
def implSlot2(self, inst):
self.assertEqual(inst.appointmentType.coding[0].code, "WALKIN")
self.assertEqual(inst.appointmentType.coding[0].display, "A previously unscheduled walk-in visit")
self.assertEqual(inst.appointmentType.coding[0].system, "http://terminology.hl7.org/CodeSystem/v2-0276")
self.assertEqual(inst.comment, "Assessments should be performed before requesting appointments in this slot.")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:30:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:30:00Z")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.serviceType[0].coding[0].code, "57")
self.assertEqual(inst.serviceType[0].coding[0].display, "Immunization")
self.assertEqual(inst.specialty[0].coding[0].code, "408480009")
self.assertEqual(inst.specialty[0].coding[0].display, "Clinical immunology")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:15:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:15:00Z")
self.assertEqual(inst.status, "free")
self.assertEqual(inst.text.status, "generated")
def testSlot3(self):
inst = self.instantiate_from("slot-example-unavailable.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot3(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot3(inst2)
def implSlot3(self, inst):
self.assertEqual(inst.comment, "Dr Careful is out of the office")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T09:45:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T09:45:00Z")
self.assertEqual(inst.id, "3")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:30:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:30:00Z")
self.assertEqual(inst.status, "busy-unavailable")
self.assertEqual(inst.text.status, "generated")
def testSlot4(self):
inst = self.instantiate_from("slot-example-tentative.json")
self.assertIsNotNone(inst, "Must have instantiated a Slot instance")
self.implSlot4(inst)
js = inst.as_json()
self.assertEqual("Slot", js["resourceType"])
inst2 = slot.Slot(js)
self.implSlot4(inst2)
def implSlot4(self, inst):
self.assertEqual(inst.comment, "Dr Careful is out of the office")
self.assertEqual(inst.end.date, FHIRDate("2013-12-25T10:00:00Z").date)
self.assertEqual(inst.end.as_json(), "2013-12-25T10:00:00Z")
self.assertEqual(inst.id, "2")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.serviceCategory[0].coding[0].code, "17")
self.assertEqual(inst.serviceCategory[0].coding[0].display, "General Practice")
self.assertEqual(inst.start.date, FHIRDate("2013-12-25T09:45:00Z").date)
self.assertEqual(inst.start.as_json(), "2013-12-25T09:45:00Z")
self.assertEqual(inst.status, "busy-tentative")
self.assertEqual(inst.text.status, "generated")
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
63cf4e7fc790f00047e1d1b4a59e089134a6a4ce | 1113e8eec4ccbbcd00c6a9b5466c5239b6f0eb03 | /cpos/foundation/_callable/core.py | d5bcbc5bd5bde2ddfb68fa7a980ecfe3e94c65cb | [] | no_license | yizhong120110/CPOS | a05858c84e04ce4aa48b3bfb43ee49264ffc5270 | 68ddf3df6d2cd731e6634b09d27aff4c22debd8e | refs/heads/master | 2021-09-01T17:59:53.802095 | 2017-12-28T05:43:06 | 2017-12-28T05:43:06 | 106,247,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,495 | py | # -*- coding: utf-8 -*-
import textwrap
import sys
import os
from ..substrate.utils.logger import logger
from ..substrate.interfaces import Clarified
class Callable(object):
def __init__(self, py_code, fragment_name=''):
self.d = {'py_code':py_code,'fragment_name':fragment_name}
def run(self, np,np_local):
"""
# 在np中注册py_code
"""
try:
#logger.ods (str(self.d['py_code']) , lv = 'dev' , cat = 'foundation.callable')
exec(self.d['py_code'],np,np_local)
return True
except:
logger.oes("callable error:" , lv = 'error' , cat = 'foundation.callable')
return False
def get_name (self):
return self.d['fragment_name']
class DynamicRuntime(Clarified):
NAME = "DynamicRuntime"
# np_init_code and udp_np should be deprecated , keeping it is just for the compatibility with old codes.
# Runtime and Callable should be used like test cases showed below.
def __init__ (self,np_init_code='',upd_np={}, np = {}, np_local = None):
self.np = np
self.np_local = np_local
self.np_init_code = np_init_code
self.prepare_environment()
self.np.update(upd_np)
def call(self,callable_object):
return callable_object.run(self.np,self.np_local)
def prepare_environment(self):
ca = Callable(textwrap.dedent(self.np_init_code.replace('\r', '')))
self.call(ca)
return True
def last_result (self):
# equel to the "_" variable in the py console.
if '_' in (self.np.keys()):
return self.np['_']
return None
def var (self,var_name):
# equel to the "_" variable in the py console.
if var_name in (self.np.keys()):
return self.np[var_name]
return None
def statement_dynamic_call (statement = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if statement != '':
if not dr.call( Callable( statement ) ):
return None
return dr
def direct_dynamic_call (module_name = '',func_name = '',args = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if dr.var('_') is None:
dr = statement_dynamic_call('_ = None',dr)
if module_name != '':
statement = 'from %s import %s' % (module_name,'*' if func_name == '' else func_name)
dr = statement_dynamic_call(statement,dr)
if func_name != '' and func_name != '*':
statement = '_ = %s(%s) or _'%(func_name, args)
dr = statement_dynamic_call(statement,dr)
if not dr:
return None
return dr
def direct_dynamic_call_pyfile (pyfile='' , root='' ,func_name = '',args = '', runtime = None):
# args like this : p1=value,p2=value,p3=value , in string.
dr = runtime or DynamicRuntime()
if dr.var('_') is None:
dr = statement_dynamic_call('_ = None',dr)
if pyfile != '':
root = os.path.abspath(root) + os.sep
pyfile = os.path.abspath(os.path.join(root, pyfile.strip('/\\')))
statement = open(pyfile,mode='rb').read()
dr = statement_dynamic_call(statement,dr)
if func_name != '':
statement = '_ = %s(%s) or _'%(func_name, args)
dr = statement_dynamic_call(statement,dr)
if not dr:
return None
return dr
scall = statement_dynamic_call
dcall = direct_dynamic_call
dcallpy = direct_dynamic_call_pyfile
#######################################################################
#TEST
a = 0
def __test_call ():
global a
a = 100
print ('__test_call')
return 0
def _test1 ():
# 使用globals()会对当前环境造成影响,导致open不能正常使用
#dr = DynamicRuntime(np=globals())
dr = DynamicRuntime(np=locals())
dr = dcall('os',runtime = dr)
if dr:
dr = dcall(func_name = 'times',args = '',runtime = dr)
if dr:
dr = dcall(func_name = 'print',args = '_',runtime = dr)
if dr:
dr = dcall(func_name = 'times',args = '',runtime = dr)
if dr:
dr = dcall(func_name = 'print',args = '_',runtime = dr)
if dr:
dr = scall('print(\' Hello \')',runtime = dr)
if dr:
dr = scall('__test_call()',runtime = dr)
print(a)
def _test2 ():
b = 1
c = 1
dr = DynamicRuntime( np = locals())
scall('b = b + 1',dr)
print(dr)
print(b)
## note! we have to obtain the resualt manually. The 'b = b + 1' call will not touch the 'b' in this scope.
# why? ????
#refer to python doc [exec]:
#Note
#The default locals act as described for function locals() below:
# modifications to the default locals dictionary should not be attempted. Pass an explicit locals dictionary
# if you need to see effects of the code on locals after function exec() returns.
#
print (dr.var('b'))
def _test3 ():
dr = scall('t3 = "this is t3" ')
print(dr.var('t3'))
dr = scall('t4 = t3 + " and t4" ',dr)
print(dr.var('t4'))
def _test4 ():
# 如果下面这句执行报错,则说明本地环境被破坏,是np=globals()造成的
#print("++++++++++==========",help(open))
dr = dcallpy(os.path.abspath( __file__ ),'_test4_print')
dr = dcallpy(func_name='_test4_print_2' ,args='1111' ,runtime=dr)
dr = dcallpy(func_name='_test4_print_3' ,args='1111,2222' ,runtime=dr)
def _test4_print():
print("===== my name is _test4_print")
def _test4_print_2(aaaa):
print("===== my name is _test4_print_2 %s"%(aaaa))
def _test4_print_3(aaaa,bbbbb):
print("===== my name is _test4_print_3 %s %s"%(aaaa,bbbbb))
def _test5 ():
dr = scall('')
dr.np['aaaaa'] = 'test is aaaaa'
dr = dcall(func_name = 'print',args = 'aaaaa',runtime = dr)
if __name__ == '__main__':
_test1()
print('==========================================================')
_test2()
print('==========================================================')
_test3()
print('==========================================================')
_test4()
print('==========================================================')
_test5()
| [
"[email protected]"
] | |
bd48767328d1904968baef929946d37d9b971dcd | 5e629210c351f369208155a11f395d47be9b837b | /conditionCompleteion/src/osd/objectService/unitTests/test_diskfile.py | 9dd7b4b3b27b4b0cd4619e36e37b004d5c54ebc0 | [] | no_license | confi-surya/pythonicPracto | 028f2a50bc595b90bee95b235ec9218da3e45fe5 | c366afd9ab54b8cacda767189f1a13efb5f961b2 | refs/heads/master | 2020-03-23T12:56:55.843408 | 2018-07-19T11:51:59 | 2018-07-19T14:37:13 | 141,572,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,853 | py | import cPickle as pickle
import os
import errno
import mock
import unittest
import email
import tempfile
import uuid
#import xattr
from shutil import rmtree
from time import time
from tempfile import mkdtemp
from hashlib import md5
from contextlib import closing, nested
from gzip import GzipFile
from eventlet import tpool
from osd.objectService.unitTests import FakeLogger, temptree
from osd.objectService.unitTests import mock as unit_mock
from osd.objectService import diskfile
from osd.common import utils
#from osd.common.utils import hash_path, mkdirs, normalize_timestamp
from osd.common.utils import mkdirs, normalize_timestamp
from osd.common.ring.ring import hash_path
from osd.common import ring
from osd.common.exceptions import DiskFileNotExist, DiskFileQuarantined, \
DiskFileDeviceUnavailable, DiskFileDeleted, DiskFileNotOpen, \
DiskFileError, ReplicationLockTimeout, PathNotDir, DiskFileCollision, \
DiskFileExpired, SwiftException, DiskFileNoSpace
import osd
class TestDiskFileManager(unittest.TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,'test_object_server_disk_file_mgr')
mkdirs(os.path.join(self.testdir,"export", "fs1"))
mkdirs(os.path.join(self.testdir,"export", "fs1"))
self.filesystems = os.path.join(os.path.join(self.testdir,"export"))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.conf = dict(filesystems=self.filesystems,mount_check='false',
keep_cache_size=2 * 1024)
self.df_mgr = diskfile.DiskFileManager(self.conf,FakeLogger())
def tearDown(self):
rmtree(self.tmpdir, ignore_errors=1)
def test_construct_filesystem_path(self):
res_path = self.df_mgr.construct_filesystem_path("abc")
self.assertEqual(os.path.join(self.filesystems,"abc"),res_path)
def test_get_filesystem_path(self):
res_path = self.df_mgr.get_filesystem_path("abc")
self.assertEqual(os.path.join(self.filesystems,"abc"),res_path)
class TestDiskFile(unittest.TestCase):
def setUp(self):
""" Setup the test"""
self.tmpdir = mkdtemp()
self.testdir = os.path.join(self.tmpdir,'test_object_server_disk_file')
self.filesystems = os.path.join(os.path.join(self.testdir,"export"))
self.filesystem = "fs1"
mkdirs(os.path.join(self.filesystems,self.filesystem))
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.conf = dict(filesystems=self.filesystems, mount_check='false',
keep_cache_size=2 * 1024, )
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
def tearDown(self):
"""tear down the test"""
rmtree(self.tmpdir, ignore_errors=1)
tpool.execute = self._orig_tpool_exc
def _create_ondisk_file(self, df, data, timestamp, metadata=None):
""" create the data amd meta data file"""
if timestamp is None:
timestamp = time()
timestamp = normalize_timestamp(timestamp)
if not metadata:
metadata = {}
if 'X-Timestamp' not in metadata:
metadata['X-Timestamp'] = normalize_timestamp(timestamp)
if 'ETag' not in metadata:
etag = md5()
etag.update(data)
metadata['ETag'] = etag.hexdigest()
if 'name' not in metadata:
metadata['name'] = '/a/c/o'
if 'Content-Length' not in metadata:
metadata['Content-Length'] = str(len(data))
hash_name = df._name_hash
mkdirs(df._datadir)
mkdirs(df._metadir)
data_file = os.path.join(df._datadir, df._name_hash + ".data")
meta_file = os.path.join(df._metadir, df._name_hash + ".meta")
with open(data_file, 'wb') as f:
f.write(data)
with open(meta_file,'wb') as f:
f.write(pickle.dumps(metadata, diskfile.PICKLE_PROTOCOL))
f.write("EOF")
def _simple_get_diskfile(self, acc_dir="a1", cont_dir='d1', obj_dir='o1', account='a', container='c', obj='o'):
"""create the DiskFile object"""
return self.df_mgr.get_diskfile(self.filesystem, acc_dir, cont_dir,
obj_dir, account, container, obj)
def _create_test_file(self, data, timestamp=None, metadata=None, account='a', container='c', obj='o'):
""" creates the test file and opens it"""
if metadata is None:
metadata = {}
metadata.setdefault('name', '/%s/%s/%s' % (account, container, obj))
df = self._simple_get_diskfile(account=account, container=container,
obj=obj)
self._create_ondisk_file(df, data, timestamp, metadata)
df = self._simple_get_diskfile(account=account, container=container,
obj=obj)
df.open()
return df
def test_open_not_exist(self):
""" Test for DiskFileNotExist Exception"""
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
def test_open_expired(self):
"""Test for DiskFileExpired Exception.
although it will not be used in Hydra
"""
self.assertRaises(DiskFileExpired,
self._create_test_file,
'1234567890', metadata={'X-Delete-At': '0'})
def test_open_not_expired(self):
""" DiskFileExpired exception should not be raised"""
try:
self._create_test_file(
'1234567890', metadata={'X-Delete-At': str(2 * int(time()))})
except SwiftException as err:
self.fail("Unexpected swift exception raised: %r" % err)
def test_get_metadata(self):
"""get metadata """
df = self._create_test_file('1234567890', timestamp=42)
md = df.get_metadata()
self.assertEqual(md['X-Timestamp'], normalize_timestamp(42))
def test_read_metadata(self):
""" read metadata """
self._create_test_file('1234567890', timestamp=42)
df = self._simple_get_diskfile()
md = df.read_metadata()
self.assertEqual(md['X-Timestamp'], normalize_timestamp(42))
def test_get_metadata_not_opened(self):
""" get metadata when the metadata field is not populated"""
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotOpen, df.get_metadata)
def test_not_opened(self):
""" test DiskFileNotOpen exception"""
df = self._simple_get_diskfile()
try:
with df:
pass
except DiskFileNotOpen:
pass
else:
self.fail("Expected DiskFileNotOpen exception")
def _get_open_disk_file(self, invalid_type=None, obj_name='o', fsize=1024,
csize=8, mark_deleted=False, prealloc=False,
ts=None, mount_check=False, extra_metadata=None):
'''returns a DiskFile'''
df = self._simple_get_diskfile(obj=obj_name)
data = '0' * fsize
etag = md5()
if ts is not None:
timestamp = ts
else:
timestamp = normalize_timestamp(time())
with df.create() as writer:
upload_size = writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(upload_size),
}
metadata.update(extra_metadata or {})
writer.put(metadata)
if invalid_type == 'ETag':
etag = md5()
etag.update('1' + '0' * (fsize - 1))
etag = etag.hexdigest()
metadata['ETag'] = etag
diskfile.write_metadata(writer._fd_meta, metadata)
elif invalid_type == 'Content-Length':
metadata['Content-Length'] = fsize - 1
diskfile.write_metadata(writer._fd_meta, metadata)
elif invalid_type == 'Bad-Content-Length':
metadata['Content-Length'] = 'zero'
diskfile.write_metadata(writer._fd_meta, metadata)
elif invalid_type == 'Missing-Content-Length':
del metadata['Content-Length']
diskfile.write_metadata(writer._fd_meta, metadata)
elif invalid_type == 'Bad-X-Delete-At':
metadata['X-Delete-At'] = 'bad integer'
diskfile.write_metadata(writer._fd_meta, metadata)
if mark_deleted:
df.delete(timestamp)
data_file = [os.path.join(df._datadir, fname)
for fname in sorted(os.listdir(df._datadir),
reverse=True)
if fname.endswith('.data')]
meta_file = [os.path.join(df._metadir, fname)
for fname in sorted(os.listdir(df._metadir),
reverse=True)
if fname.endswith('.meta')]
''' if invalid_type == 'Corrupt-Xattrs':
# We have to go below read_metadata/write_metadata to get proper
# corruption.
meta_xattr = open(meta_file,'rb').read()
wrong_byte = 'X' if meta_xattr[0] != 'X' else 'Y'
xattr.setxattr(data_files[0], "user.osd.metadata",
wrong_byte + meta_xattr[1:])
elif invalid_type == 'Truncated-Xattrs':
meta_xattr = xattr.getxattr(data_files[0], "user.osd.metadata")
xattr.setxattr(data_files[0], "user.osd.metadata",
meta_xattr[:-1])
'''
if invalid_type == 'Missing-Name':
with open(meta_file,'r') as fd:
md = diskfile.read_metadata(fd)
del md['name']
fd = os.open(meta_file,os.O_WRONLY|os.O_TRUNC)
diskfile.write_metadata(fd, md)
elif invalid_type == 'Bad-Name':
with open(meta_file,'r') as fd:
md = diskfile.read_metadata(fd)
md['name'] = md['name'] + 'garbage'
fd = os.open(meta_file,os.O_WRONLY|os.O_TRUNC)
diskfile.write_metadata(fd, md)
self.conf['disk_chunk_size'] = csize
self.conf['mount_check'] = mount_check
self.df_mgr = diskfile.DiskFileManager(self.conf, FakeLogger())
df = self._simple_get_diskfile(obj=obj_name)
df.open()
if invalid_type == 'Zero-Byte':
fp = open(df._data_file, 'w')
fp.close()
df.unit_test_len = fsize
return df
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_delete(self, write):
df = self._get_open_disk_file()
ts = time()
df.delete('data')
data_file_name = df._name_hash + ".data"
meta_file_name = df._name_hash + ".meta"
dl = os.listdir(df._datadir)
ml = os.listdir(df._metadir)
self.assertTrue(data_file_name not in set(dl))
self.assertTrue(meta_file_name in set(ml))
df.delete('meta')
dl = os.listdir(df._datadir)
ml = os.listdir(df._metadir)
self.assertTrue(meta_file_name not in set(ml))
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_open_deleted(self, write):
df = self._get_open_disk_file()
df.delete('data')
df.delete('meta')
data_file_name = df._name_hash + ".data"
meta_file_name = df._name_hash + ".meta"
dl = os.listdir(df._datadir)
ml = os.listdir(df._metadir)
self.assertTrue(data_file_name not in set(dl))
self.assertTrue(meta_file_name not in set(ml))
df = self._simple_get_diskfile()
self.assertRaises(DiskFileNotExist, df.open)
def test_listdir_enoent(self):
oserror = OSError()
oserror.errno = errno.ENOENT
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', side_effect=oserror):
self.assertEqual(self.df_mgr._listdir('path'), [])
self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
def test_listdir_other_oserror(self):
oserror = OSError()
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', side_effect=oserror):
self.assertEqual(self.df_mgr._listdir('path'), [])
self.df_mgr.logger.error.assert_called_once_with(
'ERROR: Skipping %r due to error with listdir attempt: %s',
'path', oserror)
def test_listdir(self):
self.df_mgr.logger.error = mock.MagicMock()
with mock.patch('os.listdir', return_value=['abc', 'def']):
self.assertEqual(self.df_mgr._listdir('path'), ['abc', 'def'])
self.assertEqual(self.df_mgr.logger.error.mock_calls, [])
def test_diskfile_names(self):
df = self._simple_get_diskfile()
self.assertEqual(df.account, 'a')
self.assertEqual(df.container, 'c')
self.assertEqual(df.obj, 'o')
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_diskfile_content_length_not_open(self, write):
df = self._simple_get_diskfile()
exc = None
try:
df.content_length
except DiskFileNotOpen as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_content_length(self):
self._get_open_disk_file()
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(df.content_length, 1024)
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_diskfile_timestamp_not_open(self, write):
df = self._simple_get_diskfile()
exc = None
try:
df.timestamp
except DiskFileNotOpen as err:
exc = err
self.assertEqual(str(exc), '')
def test_diskfile_timestamp(self):
self._get_open_disk_file(ts='1383181759.12345')
df = self._simple_get_diskfile()
with df.open():
self.assertEqual(df.timestamp, '1383181759.12345')
def test_write_metdata(self):
df = self._create_test_file('1234567890')
timestamp = normalize_timestamp(time())
metadata = {'X-Timestamp': timestamp, 'X-Object-Meta-test': 'data'}
metadata['name'] = '/a/c/o'
metadata['Content-Length'] = '10'
df.write_metadata(metadata)
metadata = df.read_metadata()
self.assertEquals(metadata['X-Object-Meta-test'],'data')
'''
def test_create_close_oserror(self):
df = self.df_mgr.get_diskfile(self.filesystem, '0', 'abc', '123',
'xyz')
with mock.patch("osd.obj.diskfile.os.close",
mock.MagicMock(side_effect=OSError(
errno.EACCES, os.strerror(errno.EACCES)))):
try:
with df.create():
pass
except Exception as err:
self.fail("Unexpected exception raised: %r" % err)
else:
pass
'''
def test_disk_file_mkstemp_creates_dir(self):
tmpdir = os.path.join(self.filesystems, self.filesystem)
os.rmdir(tmpdir)
df = self._simple_get_diskfile()
with df.create():
self.assert_(os.path.exists(tmpdir))
def test_disk_file_create_tmp_file(self):
tmpdir = os.path.join(self.filesystems, self.filesystem)
os.rmdir(tmpdir)
df = self._simple_get_diskfile()
file_name = '_'.join([hash_path(df.account), hash_path(df.account, df.container),
df._name_hash])
with df.create():
self.assert_(os.path.exists(df._tmpdir))
self.assert_(os.path.exists(os.path.join(df._tmpdir, file_name + ".data")))
self.assert_(os.path.exists(os.path.join(df._tmpdir, file_name + ".meta")))
def test_disk_file_finalize_put(self):
tmpdir = os.path.join(self.filesystems, self.filesystem)
os.rmdir(tmpdir)
df = self._simple_get_diskfile()
metadata = {'X-metadata-value':'data'}
file_name = '_'.join([hash_path(df.account), hash_path(df.account, df.container),
df._name_hash])
with df.create() as writer:
self.assertTrue(os.path.exists(df._tmpdir))
self.assertTrue(os.path.exists(os.path.join(df._tmpdir, file_name + ".data")))
self.assertTrue(os.path.exists(os.path.join(df._tmpdir, file_name + ".meta")))
writer.put(metadata)
self.assertTrue(os.path.exists(os.path.join(df._datadir, df._name_hash + ".data")))
self.assertTrue(os.path.exists(os.path.join(df._metadir, df._name_hash + ".meta")))
self.assertTrue(os.path.exists(df._tmpdir))
self.assertFalse(os.path.exists(os.path.join(df._tmpdir, df._name_hash + ".data")))
self.assertFalse(os.path.exists(os.path.join(df._tmpdir, df._name_hash + ".meta")))
def test_disk_file_reader_iter(self):
df = self._create_test_file('1234567890')
reader = df.reader()
self.assertEqual(''.join(reader), '1234567890')
def test_disk_file_reader_iter_w_quarantine(self):
df = self._create_test_file('1234567890')
reader = df.reader()
reader._obj_size += 1
self.assertRaises(DiskFileQuarantined, ''.join, reader)
def test_disk_file_app_iter_corners(self):
df = self._create_test_file('1234567890')
quarantine_msgs = []
reader = df.reader()
self.assertEquals(''.join(reader.app_iter_range(0, None)),
'1234567890')
df = self._simple_get_diskfile()
with df.open():
reader = df.reader()
self.assertEqual(''.join(reader.app_iter_range(5, None)), '67890')
def test_disk_file_app_iter_range_w_none(self):
df = self._create_test_file('1234567890')
reader = df.reader()
self.assertEqual(''.join(reader.app_iter_range(None, None)),
'1234567890')
def test_disk_file_app_iter_partial_closes(self):
df = self._create_test_file('1234567890')
reader = df.reader()
it = reader.app_iter_range(0, 5)
self.assertEqual(''.join(it), '12345')
self.assertTrue(reader._fp is None)
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_disk_file_app_iter_ranges(self, write):
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
it = reader.app_iter_ranges([(0, 10), (10, 20), (20, 30)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('0123456789' in value)
self.assertTrue('1123456789' in value)
self.assertTrue('2123456789' in value)
"""
def test_disk_file_app_iter_ranges_w_quarantine(self):
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
reader._obj_size += 1
try:
it = reader.app_iter_ranges([(0, 30)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
except DiskFileQuarantined as e:
err = e
#self.assertEqual('DiskFileQuarantined',str(err))
self.assertTrue('0123456789' in value)
self.assertTrue('1123456789' in value)
self.assertTrue('2123456789' in value)
"""
def test_disk_file_app_iter_ranges_w_no_etag_quarantine(self):
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
it = reader.app_iter_ranges([(0, 10)],
'plain/text',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('0123456789' in value)
def test_disk_file_app_iter_ranges_edges(self):
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
it = reader.app_iter_ranges([(3, 10), (0, 2)], 'application/whatever',
'\r\n--someheader\r\n', 30)
value = ''.join(it)
self.assertTrue('3456789' in value)
self.assertTrue('01' in value)
def test_disk_file_large_app_iter_ranges(self):
# This test case is to make sure that the disk file app_iter_ranges
# method all the paths being tested.
long_str = '01234567890' * 65536
target_strs = ['3456789', long_str[0:65590]]
df = self._create_test_file(long_str)
reader = df.reader()
it = reader.app_iter_ranges([(3, 10), (0, 65590)], 'plain/text',
'5e816ff8b8b8e9a5d355497e5d9e0301', 655360)
# The produced string actually missing the MIME headers
# need to add these headers to make it as real MIME message.
# The body of the message is produced by method app_iter_ranges
# off of DiskFile object.
header = ''.join(['Content-Type: multipart/byteranges;',
'boundary=',
'5e816ff8b8b8e9a5d355497e5d9e0301\r\n'])
value = header + ''.join(it)
parts = map(lambda p: p.get_payload(decode=True),
email.message_from_string(value).walk())[1:3]
self.assertEqual(parts, target_strs)
def test_disk_file_app_iter_ranges_empty(self):
# This test case tests when empty value passed into app_iter_ranges
# When ranges passed into the method is either empty array or None,
# this method will yield empty string
df = self._create_test_file('012345678911234567892123456789')
reader = df.reader()
it = reader.app_iter_ranges([], 'application/whatever',
'\r\n--someheader\r\n', 100)
self.assertEqual(''.join(it), '')
df = self._simple_get_diskfile()
with df.open():
reader = df.reader()
it = reader.app_iter_ranges(None, 'app/something',
'\r\n--someheader\r\n', 150)
self.assertEqual(''.join(it), '')
@mock.patch('osd.objectService.diskfile.DiskFileWriter.write', return_value=0)
def test_keep_cache(self, write):
df = self._get_open_disk_file(fsize=65)
with mock.patch("osd.objectService.diskfile.drop_buffer_cache") as foo:
for _ in df.reader():
pass
self.assertTrue(foo.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("osd.objectService.diskfile.drop_buffer_cache") as bar:
for _ in df.reader(keep_cache=False):
pass
self.assertTrue(bar.called)
df = self._get_open_disk_file(fsize=65)
with mock.patch("osd.objectService.diskfile.drop_buffer_cache") as boo:
for _ in df.reader(keep_cache=True):
pass
self.assertFalse(boo.called)
df = self._get_open_disk_file(fsize=5 * 1024, csize=256)
with mock.patch("osd.objectService.diskfile.drop_buffer_cache") as goo:
for _ in df.reader(keep_cache=True):
pass
self.assertTrue(goo.called)
if __name__ =="__main__":
unittest.main()
| [
"[email protected]"
] | |
37a08b96698b20dd1fea9d7b61d6b4b83fbb7d5e | 2672a2b664ed12f190b68deb51476b451a524561 | /portal/config.py | e45d5065a743935fa64b17b3a1a2a8ea6266d98c | [] | no_license | LCBRU/genvasc_portal_web | 9a2a27b4a2ba0fb2db402efc96eea8b2ed0a86e6 | 11eb562a5e92fd05fd5a902b7e062a2813e7b3f7 | refs/heads/master | 2023-01-09T09:59:07.301366 | 2023-01-07T14:44:07 | 2023-01-07T14:44:07 | 132,786,398 | 0 | 0 | null | 2022-01-11T13:17:30 | 2018-05-09T16:45:40 | Python | UTF-8 | Python | false | false | 2,527 | py | import os
from dotenv import load_dotenv
# Load environment variables from '.env' file.
load_dotenv()
class BaseConfig(object):
REMEMBER_COOKIE_NAME = 'GENVASC Remember Me'
REMEMBER_COOKIE_DURATION = 1
MAIL_SERVER = os.environ['MAIL_SERVER']
MAIL_DEBUG = os.environ['MAIL_DEBUG']
SECURITY_EMAIL_SENDER = os.environ['LCBRUIT_EMAIL_ADDRESS']
SECRET_KEY = os.environ['GGPP_FLASK_SECRET_KEY']
DEBUG = os.environ['GGPP_FLASK_DEBUG'] == 'True'
SQLALCHEMY_DATABASE_URI = os.environ['SQLALCHEMY_DATABASE_URI']
SQLALCHEMY_TRACK_MODIFICATIONS = (
os.environ['GGPP_SQLALCHEMY_TRACK_MODIFICATIONS'] == 'True'
)
SQLALCHEMY_ECHO = os.environ['GGPP_SQLALCHEMY_ECHO'] == 'True'
SECURITY_PASSWORD_HASH = os.environ['GGPP_SECURITY_PASSWORD_HASH']
SECURITY_PASSWORD_SALT = os.environ['GGPP_SECURITY_PASSWORD_SALT']
SECURITY_TRACKABLE = os.environ['GGPP_SECURITY_TRACKABLE'] == 'True'
SMTP_SERVER = 'localhost'
APPLICATION_EMAIL_ADDRESS = os.environ['LCBRUIT_EMAIL_ADDRESS']
ERROR_EMAIL_SUBJECT = 'GENVASC Portal Error'
SECURITY_CHANGEABLE = True
SECURITY_RECOVERABLE = True
MAIL_DEFAULT_SENDER = os.environ["LCBRUIT_EMAIL_ADDRESS"]
# Admin user
ADMIN_EMAIL_ADDRESS = os.environ['ADMIN_EMAIL_ADDRESS']
ADMIN_FIRST_NAME = os.environ['ADMIN_FIRST_NAME']
ADMIN_LAST_NAME = os.environ['ADMIN_LAST_NAME']
ADMIN_PASSWORD = os.environ['ADMIN_PASSWORD']
# Celery Settings
broker_url=os.environ["BROKER_URL"]
result_backend=os.environ["CELERY_RESULT_BACKEND"]
CELERY_RATE_LIMIT=os.environ["CELERY_RATE_LIMIT"]
CELERY_REDIRECT_STDOUTS_LEVEL=os.environ["CELERY_REDIRECT_STDOUTS_LEVEL"]
CELERY_DEFAULT_QUEUE=os.environ["CELERY_DEFAULT_QUEUE"]
# Celery Schedules
PRACTICE_ETL_SCHEDULE_MINUTE=os.environ["PRACTICE_ETL_SCHEDULE_MINUTE"]
PRACTICE_ETL_SCHEDULE_HOUR=os.environ["PRACTICE_ETL_SCHEDULE_HOUR"]
# Databases
PRACTICE_DATABASE_URI=os.environ["PRACTICE_DATABASE_URI"]
RECRUIT_DATABASE_URI=os.environ["RECRUIT_DATABASE_URI"]
IMPORT_DATABASE_URI=os.environ["IMPORT_DATABASE_URI"]
class TestConfig(BaseConfig):
"""Configuration for automated testing"""
TESTING = True
SQLALCHEMY_DATABASE_URI="sqlite://"
PRACTICE_DATABASE_URI="sqlite://"
RECRUIT_DATABASE_URI="sqlite://"
WTF_CSRF_ENABLED = False
SMTP_SERVER = None
SQLALCHEMY_ECHO = False
broker_url=os.environ["BROKER_URL"] + '/test'
class TestConfigCRSF(TestConfig):
WTF_CSRF_ENABLED = True
| [
"[email protected]"
] | |
3dd764efee547895b61b17074bef1e80ee82a562 | 9bb6795a12d6e042b962704dab9ec59d92d54e8f | /1_numpy/2_reshape.py | b5d60e8241460327f2b7b83d534050593e76005f | [] | no_license | kimsoosoo0928/Perfect_Guide | c5177037512cb06814f0bbfcb70a22d14c9ec1fb | 9b615d320957babb1a918fb38282062998a1e5c4 | refs/heads/main | 2023-07-18T12:29:03.353274 | 2021-08-29T00:31:28 | 2021-08-29T00:31:28 | 396,668,104 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | import numpy as np
array1 = np.arange(10)
print('array1 : \n', array1)
array2 = array1.reshape(2,5)
print('array2 : \n', array2)
array3 = array1.reshape(5,2)
print('array3 : \n', array3)
'''
array1 :
[0 1 2 3 4 5 6 7 8 9]
array2 :
[[0 1 2 3 4]
[5 6 7 8 9]]
array3 :
[[0 1]
[2 3]
[4 5]
[6 7]
[8 9]]
'''
array1 = np.arange(10)
print(array1)
array2 = array1.reshape(-1,5)
print('array2 shape : ', array2.shape)
array3 = array1.reshape(5,-1)
print('array3 shape : ', array3.shape)
'''
[0 1 2 3 4 5 6 7 8 9]
array2 shape : (2, 5)
array3 shape : (5, 2)
'''
array1 = np.arange(8)
array3d = array1.reshape((2,2,2))
print('array3d : \n', array3d.tolist())
array5 = array1.reshape((-1,1))
print('array5 : \n', array5.tolist())
print('array5 shape : \n', array5.shape)
array6 = array1.reshape((-1,1))
print('array6 : \n', array6.tolist())
print('array6 shape : \n', array6.shape)
'''
array3d :
[[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
array5 :
[[0], [1], [2], [3], [4], [5], [6], [7]]
array5 shape :
(8, 1)
array6 :
[[0], [1], [2], [3], [4], [5], [6], [7]]
array6 shape :
(8, 1)
''' | [
"[email protected]"
] | |
8764d59d46a5444c80f139b86086313daedfab35 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_44595.py | 4b502c0083653180de1a265b86c37f599c2c98b6 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,838 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((556.227, 579.162, 346.569), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((577.62, 567.506, 410.007), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((592.146, 544.952, 487.833), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((499.902, 472.089, 412.099), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((650.184, 533.797, 673.199), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((561.491, 577.074, 392.605), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((560.64, 577.701, 391.737), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((548.961, 593.488, 372.017), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((524.868, 604.785, 363.173), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((520.518, 631.661, 369.594), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((493.613, 634.177, 377.669), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((484.765, 629.11, 404.082), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((573.32, 581.782, 367.095), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((401.368, 681.202, 444.553), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((517.783, 644.929, 605.75), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((517.783, 644.929, 605.75), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((523.844, 630.733, 581.449), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((527.23, 614.474, 557.945), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((535.187, 601.124, 533.882), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((545.343, 599.826, 507.38), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((558.97, 603.517, 482.597), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((569.17, 602.239, 455.675), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((362.68, 662.516, 597.87), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((769.647, 538.552, 302.63), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((601.522, 590.816, 476.849), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((601.522, 590.816, 476.849), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((577.776, 574.478, 480.971), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((561.588, 550.307, 477.712), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((569.861, 522.146, 475.878), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((545.922, 522.059, 353.142), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((600.455, 513.149, 596.661), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((563.384, 545.507, 397.988), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((563.305, 545.338, 397.921), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((537.502, 535.552, 393.737), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((523.419, 556.314, 405.96), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((510.924, 572.397, 425.288), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((510.416, 600.039, 430.598), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((515.45, 624.916, 418.683), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((510.866, 649.168, 405.149), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((595.256, 639.773, 414.377), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((423.904, 655.559, 395.424), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((625.146, 604.894, 419.111), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((611.326, 582.726, 424.898), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((581.353, 535.47, 439.284), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((552.729, 488.233, 454.57), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((568.434, 467.24, 378.12), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((501.281, 441.109, 532.012), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((533.796, 590.685, 408.018), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((551.47, 584.314, 429.623), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((571.799, 570.297, 444.162), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((596.698, 560.943, 455.419), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((617.449, 542.988, 465.281), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((633.66, 525.076, 482.118), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((602.35, 559.943, 417.847), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((667.147, 487.057, 548.033), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
7fa50c182bf54b2fbf51441eefa0f324279633e7 | 1431b07074b96c7baa6a43a99717da2a658424af | /test/utils/Test_Zip_Folder.py | d6ecc3e784eaacfbffe1988284d8bf95e88f557b | [
"Apache-2.0"
] | permissive | almeidam/pbx-gs-python-utils | 054a7334070627bc27f682ed78c2986230d1cfab | 3f8987dd2d1fc27d1d262385280d7303009f5453 | refs/heads/master | 2020-04-30T10:44:46.179729 | 2019-03-20T13:59:01 | 2019-03-20T13:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | import json
from unittest import TestCase
from utils.Dev import Dev
from utils.Files import Files
from utils.Misc import Misc
from utils.Zip_Folder import Zip_Folder
class Test_Zip_Folder(TestCase):
def test__using_with__no_params(self):
with Zip_Folder() as (zip_file):
assert zip_file is None
def test__using_with_params(self):
target_folder = Files.current_folder()
with Zip_Folder(target_folder) as (zip_file):
assert Files.exists(zip_file) is True
assert Files.exists(zip_file) is False
| [
"[email protected]"
] | |
5e991e4e3dc2696c8cfb6c76836a9bc9521137d2 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Bert-text-classification_for_PyTorch/transformers/src/transformers/models/marian/modeling_flax_marian.py | 9d8b44c5f9da84470baadab2b4eafaf3dfea6fd6 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 63,778 | py | # coding=utf-8
# Copyright 2021 The Marian Team Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flax Marian model."""
import math
import random
from functools import partial
from typing import Callable, Optional, Tuple
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from jax import lax
from jax.random import PRNGKey
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...modeling_flax_outputs import (
FlaxBaseModelOutput,
FlaxBaseModelOutputWithPastAndCrossAttentions,
FlaxCausalLMOutputWithCrossAttentions,
FlaxSeq2SeqLMOutput,
FlaxSeq2SeqModelOutput,
)
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
append_call_sample_docstring,
append_replace_return_docstrings,
overwrite_call_docstring,
)
from ...utils import logging
from .configuration_marian import MarianConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "Helsinki-NLP/opus-mt-en-de"
_CONFIG_FOR_DOC = "MarianConfig"
_TOKENIZER_FOR_DOC = "MarianTokenizer"
MARIAN_START_DOCSTRING = r"""
This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Flax Linen
[flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
Parameters:
config ([`MarianConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given `dtype`.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
[`~FlaxPreTrainedModel.to_bf16`].
"""
MARIAN_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
MARIAN_ENCODE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
MARIAN_DECODE_INPUTS_DOCSTRING = r"""
Args:
decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`MarianTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
encoder_outputs (`tuple(tuple(jnp.ndarray)`):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
range `[0, config.max_position_embeddings - 1]`.
past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
def create_sinusoidal_positions(n_pos, dim):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
sentinel = dim // 2 + dim % 2
out = np.zeros_like(position_enc)
out[:, 0:sentinel] = np.sin(position_enc[:, 0::2])
out[:, sentinel:] = np.cos(position_enc[:, 1::2])
return jnp.array(out)
# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->Marian
class FlaxMarianAttention(nn.Module):
config: MarianConfig
embed_dim: int
num_heads: int
dropout: float = 0.0
causal: bool = False
bias: bool = True
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self) -> None:
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {self.num_heads})."
)
dense = partial(
nn.Dense,
self.embed_dim,
use_bias=self.bias,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
self.out_proj = dense()
self.dropout_layer = nn.Dropout(rate=self.dropout)
if self.causal:
self.causal_mask = make_causal_mask(
jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states: jnp.ndarray,
key_value_states: Optional[jnp.ndarray] = None,
attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size = hidden_states.shape[0]
# get query proj
query_states = self.q_proj(hidden_states)
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self.k_proj(key_value_states)
value_states = self.v_proj(key_value_states)
else:
# self_attention
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self._split_heads(query_states)
key_states = self._split_heads(key_states)
value_states = self._split_heads(value_states)
# handle cache prepare causal attention mask
if self.causal:
query_length, key_length = query_states.shape[1], key_states.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
# combine masks if needed
if attention_mask is not None and self.causal:
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask)
elif self.causal:
attention_mask = causal_mask
elif attention_mask is not None:
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
key_states, value_states, attention_mask = self._concatenate_to_cache(
key_states, value_states, query_states, attention_mask
)
# Convert the boolean attention mask to an attention bias.
if attention_mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype),
)
else:
attention_bias = None
dropout_rng = None
if not deterministic and self.dropout > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayer with Bart->Marian
class FlaxMarianEncoderLayer(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxMarianAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.encoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->Marian
class FlaxMarianEncoderLayerCollection(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxMarianEncoderLayer(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.encoder_layers)
]
self.layerdrop = self.config.encoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for encoder_layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions,
deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states, all_hidden_states, all_attentions)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayer with Bart->Marian
class FlaxMarianDecoderLayer(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32
def setup(self) -> None:
self.embed_dim = self.config.d_model
self.self_attn = FlaxMarianAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
causal=True,
dtype=self.dtype,
)
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
self.activation_fn = ACT2FN[self.config.activation_function]
self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.encoder_attn = FlaxMarianAttention(
config=self.config,
embed_dim=self.embed_dim,
num_heads=self.config.decoder_attention_heads,
dropout=self.config.attention_dropout,
dtype=self.dtype,
)
self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
self.fc1 = nn.Dense(
self.config.encoder_ffn_dim,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.fc2 = nn.Dense(
self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
)
self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
def __call__(
self,
hidden_states: jnp.ndarray,
attention_mask: jnp.ndarray,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = True,
deterministic: bool = True,
) -> Tuple[jnp.ndarray]:
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->Marian
class FlaxMarianDecoderLayerCollection(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxMarianDecoderLayer(self.config, name=str(i), dtype=self.dtype)
for i in range(self.config.decoder_layers)
]
self.layerdrop = self.config.decoder_layerdrop
def __call__(
self,
hidden_states,
attention_mask,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if not deterministic and (dropout_probability < self.layerdrop):
layer_outputs = (None, None, None)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
init_cache=init_cache,
output_attentions=output_attentions,
deterministic=deterministic,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
class FlaxMarianEncoder(nn.Module):
config: MarianConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.max_source_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
self.embed_positions = create_sinusoidal_positions(self.config.max_position_embeddings, embed_dim)
self.layers = FlaxMarianEncoderLayerCollection(self.config, self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
positions = jnp.take(self.embed_positions, position_ids, axis=0)
# explictly cast the positions here, since self.embed_positions are not registered as parameters
positions = positions.astype(inputs_embeds.dtype)
hidden_states = inputs_embeds + positions
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return outputs
return FlaxBaseModelOutput(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class FlaxMarianDecoder(nn.Module):
config: MarianConfig
embed_tokens: nn.Embed
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dropout_layer = nn.Dropout(rate=self.config.dropout)
embed_dim = self.config.d_model
self.max_target_positions = self.config.max_position_embeddings
self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
self.embed_positions = create_sinusoidal_positions(self.config.max_position_embeddings, embed_dim)
self.layers = FlaxMarianDecoderLayerCollection(self.config, self.dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states: Optional[jnp.ndarray] = None,
encoder_attention_mask: Optional[jnp.ndarray] = None,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
input_shape = input_ids.shape
input_ids = input_ids.reshape(-1, input_shape[-1])
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
# embed positions
positions = jnp.take(self.embed_positions, position_ids, axis=0)
# explictly cast the positions here, since self.embed_positions are not registered as parameters
positions = positions.astype(inputs_embeds.dtype)
hidden_states = inputs_embeds + positions
hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
outputs = self.layers(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return outputs
return FlaxBaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
class FlaxMarianModule(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.shared = nn.Embed(
self.config.vocab_size,
self.config.d_model,
embedding_init=jax.nn.initializers.normal(self.config.init_std),
)
self.encoder = FlaxMarianEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
self.decoder = FlaxMarianDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
def _get_encoder_module(self):
return self.encoder
def _get_decoder_module(self):
return self.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return FlaxSeq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
class FlaxMarianPreTrainedModel(FlaxPreTrainedModel):
config_class = MarianConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
def __init__(
self,
config: MarianConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
**kwargs
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
# make sure initialization pass will work for FlaxMarianForSequenceClassificationModule
input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
attention_mask = jnp.ones_like(input_ids)
decoder_input_ids = input_ids
decoder_attention_mask = jnp.ones_like(input_ids)
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(
rngs,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
)["params"]
def init_cache(self, batch_size, max_length, encoder_outputs):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
"""
# init input variables to retrieve cache
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
)
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)
init_variables = self.module.init(
jax.random.PRNGKey(0),
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
init_cache=True,
method=_decoder_forward, # we only need to call the decoder to init the cache
)
return unfreeze(init_variables["cache"])
@add_start_docstrings(MARIAN_ENCODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=MarianConfig)
def encode(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=64, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_ids, attention_mask, position_ids, **kwargs)
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
method=_encoder_forward,
)
@add_start_docstrings(MARIAN_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=MarianConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=64, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxMarianAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
@add_start_docstrings_to_model_forward(MARIAN_INPUTS_DOCSTRING)
def __call__(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
decoder_input_ids: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# prepare encoder inputs
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# prepare decoder inputs
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
)
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
if decoder_position_ids is None:
batch_size, sequence_length = decoder_input_ids.shape
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
)
@add_start_docstrings(
"The bare Marian Model transformer outputting raw hidden-states without any specific head on top.",
MARIAN_START_DOCSTRING,
)
class FlaxMarianModel(FlaxMarianPreTrainedModel):
config: MarianConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
module_class = FlaxMarianModule
append_call_sample_docstring(
FlaxMarianModel, _TOKENIZER_FOR_DOC, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC
)
class FlaxMarianMTModule(nn.Module):
config: MarianConfig
dtype: jnp.dtype = jnp.float32
bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
def setup(self):
self.model = FlaxMarianModule(config=self.config, dtype=self.dtype)
self.lm_head = nn.Dense(
self.model.shared.num_embeddings,
use_bias=False,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.init_std),
)
self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
def _get_encoder_module(self):
return self.model.encoder
def _get_decoder_module(self):
return self.model.decoder
def __call__(
self,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
deterministic: bool = True,
):
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
position_ids=position_ids,
decoder_position_ids=decoder_position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=deterministic,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = self.model.variables["params"]["shared"]["embedding"]
lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
lm_logits += self.final_logits_bias.astype(self.dtype)
if not return_dict:
output = (lm_logits,) + outputs[1:]
return output
return FlaxSeq2SeqLMOutput(
logits=lm_logits,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@add_start_docstrings(
"The MARIAN Model with a language modeling head. Can be used for translation.", MARIAN_START_DOCSTRING
)
class FlaxMarianMTModel(FlaxMarianPreTrainedModel):
module_class = FlaxMarianMTModule
dtype: jnp.dtype = jnp.float32
@add_start_docstrings(MARIAN_DECODE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=MarianConfig)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=64, return_tensors="jax")
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> logits = outputs.logits
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxMarianAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
outputs = decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_embedding = module.model.variables["params"]["shared"]["embedding"]
lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
else:
lm_logits = module.lm_head(hidden_states)
lm_logits += module.final_logits_bias.astype(self.dtype)
return lm_logits, outputs
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
if past_key_values is None:
lm_logits, decoder_outputs = outputs
else:
(lm_logits, decoder_outputs), past = outputs
if return_dict:
outputs = FlaxCausalLMOutputWithCrossAttentions(
logits=lm_logits,
hidden_states=decoder_outputs.hidden_states,
attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
)
else:
outputs = (lm_logits,) + decoder_outputs[1:]
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
def _adapt_logits_for_beam_search(self, logits):
"""This function enforces the padding token never to be generated."""
logits = logits.at[:, :, self.config.pad_token_id].set(float("-inf"))
return logits
def prepare_inputs_for_generation(
self,
decoder_input_ids,
max_length,
attention_mask: Optional[jnp.DeviceArray] = None,
decoder_attention_mask: Optional[jnp.DeviceArray] = None,
encoder_outputs=None,
**kwargs
):
# initializing the cache
batch_size, seq_length = decoder_input_ids.shape
past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since the decoder uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if decoder_attention_mask is not None:
position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"encoder_outputs": encoder_outputs,
"encoder_attention_mask": attention_mask,
"decoder_attention_mask": extended_attention_mask,
"decoder_position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
return model_kwargs
FLAX_MARIAN_MT_DOCSTRING = """
Returns:
Example:
```python
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> tokenizer = MarianTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
>>> text = "My friends are cool but they eat too many carbs."
>>> input_ids = tokenizer(text, max_length=64, return_tensors="jax").input_ids
>>> sequences = model.generate(input_ids, max_length=64, num_beams=2).sequences
>>> outputs = tokenizer.batch_decode(sequences, skip_special_tokens=True)
>>> # should give *Meine Freunde sind cool, aber sie essen zu viele Kohlenhydrate.*
```
"""
overwrite_call_docstring(
FlaxMarianMTModel,
MARIAN_INPUTS_DOCSTRING + FLAX_MARIAN_MT_DOCSTRING,
)
append_replace_return_docstrings(FlaxMarianMTModel, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
| [
"[email protected]"
] | |
69acf6cb42853141e98f121c77a9d61f1f1a30cf | 2c926b4847a44c7f831d47ed0160751d3248e8f4 | /venv/lib/python3.8/site-packages/hubspot/automation/actions/models/single_field_dependency.py | f18ca6bf64e6504458c415ed11f6e4ab7e527d5a | [] | no_license | Women-in-Tech-Society/WITS_Site | c42cd2c9abe1b5515b80be82dc876a6c3842e42a | 5dbf22f5ee5a36358f6f279af4c13d86d31653c5 | refs/heads/main | 2023-05-11T02:34:05.531902 | 2021-06-01T01:05:12 | 2021-06-01T01:05:12 | 278,658,100 | 0 | 5 | null | 2022-11-22T18:41:35 | 2020-07-10T14:43:28 | Python | UTF-8 | Python | false | false | 6,688 | py | # coding: utf-8
"""
Custom Workflow Actions
Create custom workflow actions # noqa: E501
The version of the OpenAPI document: v4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.automation.actions.configuration import Configuration
class SingleFieldDependency(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"dependency_type": "str",
"dependent_field_names": "list[str]",
"controlling_field_name": "str",
}
attribute_map = {
"dependency_type": "dependencyType",
"dependent_field_names": "dependentFieldNames",
"controlling_field_name": "controllingFieldName",
}
def __init__(
self,
dependency_type="SINGLE_FIELD",
dependent_field_names=None,
controlling_field_name=None,
local_vars_configuration=None,
): # noqa: E501
"""SingleFieldDependency - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._dependency_type = None
self._dependent_field_names = None
self._controlling_field_name = None
self.discriminator = None
self.dependency_type = dependency_type
self.dependent_field_names = dependent_field_names
self.controlling_field_name = controlling_field_name
@property
def dependency_type(self):
"""Gets the dependency_type of this SingleFieldDependency. # noqa: E501
:return: The dependency_type of this SingleFieldDependency. # noqa: E501
:rtype: str
"""
return self._dependency_type
@dependency_type.setter
def dependency_type(self, dependency_type):
"""Sets the dependency_type of this SingleFieldDependency.
:param dependency_type: The dependency_type of this SingleFieldDependency. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and dependency_type is None
): # noqa: E501
raise ValueError(
"Invalid value for `dependency_type`, must not be `None`"
) # noqa: E501
allowed_values = ["SINGLE_FIELD"] # noqa: E501
if (
self.local_vars_configuration.client_side_validation
and dependency_type not in allowed_values
): # noqa: E501
raise ValueError(
"Invalid value for `dependency_type` ({0}), must be one of {1}".format( # noqa: E501
dependency_type, allowed_values
)
)
self._dependency_type = dependency_type
@property
def dependent_field_names(self):
"""Gets the dependent_field_names of this SingleFieldDependency. # noqa: E501
:return: The dependent_field_names of this SingleFieldDependency. # noqa: E501
:rtype: list[str]
"""
return self._dependent_field_names
@dependent_field_names.setter
def dependent_field_names(self, dependent_field_names):
"""Sets the dependent_field_names of this SingleFieldDependency.
:param dependent_field_names: The dependent_field_names of this SingleFieldDependency. # noqa: E501
:type: list[str]
"""
if (
self.local_vars_configuration.client_side_validation
and dependent_field_names is None
): # noqa: E501
raise ValueError(
"Invalid value for `dependent_field_names`, must not be `None`"
) # noqa: E501
self._dependent_field_names = dependent_field_names
@property
def controlling_field_name(self):
"""Gets the controlling_field_name of this SingleFieldDependency. # noqa: E501
:return: The controlling_field_name of this SingleFieldDependency. # noqa: E501
:rtype: str
"""
return self._controlling_field_name
@controlling_field_name.setter
def controlling_field_name(self, controlling_field_name):
"""Sets the controlling_field_name of this SingleFieldDependency.
:param controlling_field_name: The controlling_field_name of this SingleFieldDependency. # noqa: E501
:type: str
"""
if (
self.local_vars_configuration.client_side_validation
and controlling_field_name is None
): # noqa: E501
raise ValueError(
"Invalid value for `controlling_field_name`, must not be `None`"
) # noqa: E501
self._controlling_field_name = controlling_field_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SingleFieldDependency):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SingleFieldDependency):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
d75ab97fb9184a24f45a05f01fc83903b2dc748e | 6f8aec72f983715b1dcc1e067e980a440440423a | /bruteguard/patterns/singleton.py | a9f7c0f51adf9d2ce958d11132938a6d7c1b1ffb | [
"MIT"
] | permissive | dcopm999/django-brute-guard | 41cef7c1f98b275c0ef2176424c8ef1e75002fdb | e4c629d81f1cc732ddae2a43042e92ea423884b8 | refs/heads/master | 2023-08-02T06:16:54.219332 | 2021-09-30T05:45:10 | 2021-09-30T05:45:10 | 409,435,237 | 0 | 0 | MIT | 2021-09-30T05:45:10 | 2021-09-23T03:32:47 | Python | UTF-8 | Python | false | false | 1,076 | py | from typing import Dict
class SingletonMeta(type):
"""
В Python класс Одиночка можно реализовать по-разному. Возможные способы
включают себя базовый класс, декоратор, метакласс. Мы воспользуемся
метаклассом, поскольку он лучше всего подходит для этой цели.
"""
_instances: Dict[type, type] = {}
def __call__(cls, *args, **kwargs):
"""
Данная реализация не учитывает возможное изменение передаваемых
аргументов в `__init__`.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class Singleton(object):
def __new__(cls):
if not hasattr(cls, "instance"):
cls.instance = super(Singleton, cls).__new__(cls)
return cls.instance
| [
"[email protected]"
] | |
9bf099e4570aab4e3c827aba4cfa379cb7ad7196 | a86cb1d0cc2c01ccc5b7d03d25a1b98d4f8b66ca | /day_18/crawling_03.py | 08ce38608a68158041385e8770f169492843e3ce | [] | no_license | yongseongCho/python_201911 | 020efd812df909f6d1150c6a15a9a4fa6ee946b6 | f4696fac81a101d13a95ca0ca602e6478b4d2f58 | refs/heads/master | 2020-09-12T12:44:46.364259 | 2019-12-19T13:17:08 | 2019-12-19T13:17:08 | 222,429,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup as bs
html = '''
<td class="title">
<div class="tit3">
<a href="/movie/bi/mi/basic.nhn?code=181710"
title="포드 V 페라리">포드 V 페라리</a>
</div>
</td>
'''
soup = bs(html, 'html.parser')
# BeautifulSoup 객체의 find 메소드
# 태그의 이름과 속성의 정보를 조합하여
# 검색하는 경우 활용
# - find 메소드는 검색 결과를 하나만 반환
# - 최초에 발견된 첫 번째 태그를 반환
# 첫 번째로 검색된 td 태그의 객체를 반환
tag = soup.find(name='td')
print(f'tag -> {tag}')
# 첫 번째로 검색된 a 태그의 객체를 반환
tag = soup.find(name='a')
print(f'tag -> {tag}')
# 첫 번째로 검색된
# class 속성이 title인 객체를 반환
tag = soup.find(attrs={'class':'title'})
print(f'tag -> {tag}')
# 첫 번째로 검색된
# class 속성이 tit3인 객체를 반환
tag = soup.find(attrs={'class':'tit3'})
print(f'tag -> {tag}')
# 첫 번째로 검색된
# 태그의 이름이 td이고,
# class 속성이 tit3인 객체를 반환
# - 존재하지 않는 경우 None 값이 반환
tag = soup.find(name='td',
attrs={'class':'tit3'})
print(f'tag -> {tag}')
| [
"[email protected]"
] | |
90adc3801f23ed865f8ce3373066f9a2a5ee43e3 | e2bd8debf59f71e2c7fabea03cc108618944b2b0 | /el_pagination/paginators.py | 6da5231fca53a0b0e1e586150ed4c8803e1d1b0e | [] | no_license | successar/Quizz | 874c7c8656c33973d5d4f9563073b0434573a333 | 2244ff13568db92e3ff88156982ec44c83418199 | refs/heads/master | 2021-01-21T13:11:45.960397 | 2016-05-11T10:34:48 | 2016-05-11T10:34:48 | 53,747,315 | 1 | 1 | null | 2016-05-07T15:00:41 | 2016-03-12T18:36:34 | Python | UTF-8 | Python | false | false | 4,359 | py | """Customized Django paginators."""
from __future__ import unicode_literals
from math import ceil
from django.core.paginator import (
EmptyPage,
Page,
PageNotAnInteger,
Paginator,
)
class CustomPage(Page):
"""Handle different number of items on the first page."""
def start_index(self):
"""Return the 1-based index of the first item on this page."""
paginator = self.paginator
# Special case, return zero if no items.
if paginator.count == 0:
return 0
elif self.number == 1:
return 1
return (
(self.number - 2) * paginator.per_page + paginator.first_page + 1)
def end_index(self):
"""Return the 1-based index of the last item on this page."""
paginator = self.paginator
# Special case for the last page because there can be orphans.
if self.number == paginator.num_pages:
return paginator.count
return (self.number - 1) * paginator.per_page + paginator.first_page
class BasePaginator(Paginator):
"""A base paginator class subclassed by the other real paginators.
Handle different number of items on the first page.
"""
def __init__(self, object_list, per_page, **kwargs):
if 'first_page' in kwargs:
self.first_page = kwargs.pop('first_page')
else:
self.first_page = per_page
super(BasePaginator, self).__init__(object_list, per_page, **kwargs)
def get_current_per_page(self, number):
return self.first_page if number == 1 else self.per_page
class DefaultPaginator(BasePaginator):
"""The default paginator used by this application."""
def page(self, number):
number = self.validate_number(number)
if number == 1:
bottom = 0
else:
bottom = ((number - 2) * self.per_page + self.first_page)
top = bottom + self.get_current_per_page(number)
if top + self.orphans >= self.count:
top = self.count
return CustomPage(self.object_list[bottom:top], number, self)
def _get_num_pages(self):
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(0, self.count - self.orphans - self.first_page)
try:
self._num_pages = int(ceil(hits / float(self.per_page))) + 1
except ZeroDivisionError:
self._num_pages = 0 # fallback to a safe value
return self._num_pages
num_pages = property(_get_num_pages)
class LazyPaginator(BasePaginator):
"""Implement lazy pagination."""
def validate_number(self, number):
try:
number = int(number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
number = self.validate_number(number)
current_per_page = self.get_current_per_page(number)
if number == 1:
bottom = 0
else:
bottom = ((number - 2) * self.per_page + self.first_page)
top = bottom + current_per_page
# Retrieve more objects to check if there is a next page.
objects = list(self.object_list[bottom:top + self.orphans + 1])
objects_count = len(objects)
if objects_count > (current_per_page + self.orphans):
# If another page is found, increase the total number of pages.
self._num_pages = number + 1
# In any case, return only objects for this page.
objects = objects[:current_per_page]
elif (number != 1) and (objects_count <= self.orphans):
raise EmptyPage('That page contains no results')
else:
# This is the last page.
self._num_pages = number
return CustomPage(objects, number, self)
def _get_count(self):
raise NotImplementedError
count = property(_get_count)
def _get_num_pages(self):
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
raise NotImplementedError
page_range = property(_get_page_range)
| [
"[email protected]"
] | |
2a739b751d27912b4ec246d9d6c54a4b4576bb53 | 441ee516fa509a66eb6a6132ed0fbafeae1a06ae | /uploadf/models.py | ecd3c53d7d6062ba60639a19f3c1636e76875306 | [] | no_license | Shirhussain/FileUpload | 3237627020ec322d4097e757b64f9f0c64feb4e7 | 19d2e848d7d05fd46838f9140c0a5658bbca281a | refs/heads/master | 2022-08-26T13:26:23.859084 | 2020-05-27T22:02:36 | 2020-05-27T22:02:36 | 264,777,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from django.db import models
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.CharField(max_length=50)
pdf = models.FileField(upload_to="mag/")
cover = models.ImageField(upload_to="mag/cover/", null=True, blank=True)
def __str__(self):
return self.title
def delete(self,*args, **kwargs):
self.pdf.delete()
self.cover.delete()
super().delete(*args, **kwargs)
| [
"[email protected]"
] | |
fbf9ee05f41aa879f6e8efe0d638a2ad5f92c86f | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/53/usersdata/94/21421/submittedfiles/matriz2.py | 0d65cb52f0ea2e9536be7e87278cf3364bd3fd2d | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def soma_diagonal_principal(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,i]
return soma
def soma_diagonal_secundaria(a):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,a.shape[0]-i-1]:
return soma
def soma_linha(a):
a=[]
for i in range (0,a.shape[0],1):
soma=0
for j in range (0,a.shape[1],1):
soma=soma+a[i,j]
a.append(soma)
return c
def soma_coluna(a):
a=[]
for j in range(0,a.shape[1],1):
soma=0
for i in range(0,a.shape[0],1):
soma=soma+a[i,j]
a.append(soma)
return d
def magico(a):
sdp=soma_diagonal_principal(a)
sds=soma_diagonal_secundaria(a)
sl=soma_linha(a)
sc=soma_coluna(a)
cont=0
for i in range(0,len(sl),1):
if sdp==sds==sl[i]==sc[i]:
cont=cont+1
if cont==len(sl):
return True
else:
return False
linhas=input('digite a quantidade de linhas')
colunas=input('digite a quantidade de colunas')
a=np.zeros((linhas,colunas))
for i in range (0,a.shape[0],1):
for j in range(0,a.shape[1],1):
a[i,j]=input('digite um elemento:')
if magico(a):
print ('S')
else:
print ('N')
| [
"[email protected]"
] | |
5cbebc094716ebcd2abe250c57520dee3117a1d0 | d7d25574246fd8585396a02ebd2ca8450e49b082 | /leetcode-py/leetcode1041.py | 44b0f6df2298740f5cbbebe712ae04d38cac1548 | [] | no_license | cicihou/LearningProject | b6b1de2300e574835f253935d0c0ae693b194020 | 3a5649357e0f21cbbc5e238351300cd706d533b3 | refs/heads/master | 2022-12-04T06:18:14.856766 | 2022-11-29T08:54:16 | 2022-11-29T08:54:16 | 141,606,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | class Solution:
def isRobotBounded(self, instructions: str) -> bool:
'''
lc 657 的变种
这个问题比较 tricky,其实更接近数学问题
实际上只要判断机器人最后是否面朝北,就可以知道是不是回到了原点
关于为什么 instructions *= 4 是最小限度的解释:
https://leetcode.com/problems/robot-bounded-in-circle/discuss/850437/Python-O(n)-solution-explained
https://leetcode.com/problems/robot-bounded-in-circle/discuss/290915/Python-Concise-%2B-Explanation
note: 这个 direc 的判断思想好好记一下,我自己写的时候直觉要用两个数组表达左右方向,其实左右方向只是周期不同
'''
direc = [(0, 1), (-1, 0), (0, -1), (1, 0)]
d = 0
start = (0, 0)
instructions *= 4
for ch in instructions:
if ch == 'G':
nx, ny = direc[d]
start = start[0] + nx, start[1] + ny
else:
if ch == 'L':
d = (d + 1) % 4
if ch == 'R':
d = (d + 3) % 4
return start == (0, 0)
'''
更加 tricky 的数学解法:判断机器人是否朝北,不朝北就一定能走回来
https://leetcode.com/problems/robot-bounded-in-circle/discuss/291221/Python-O(N)-time-O(1)-space-beats-100-detailed-explanations
'''
direc = [(0, 1), (-1, 0), (0, -1), (1, 0)]
d = 0
start = (0, 0)
for ch in instructions:
if ch == 'L':
d = (d+1) % 4
elif ch == 'R':
d = (d+3) % 4
else:
nx, ny = direc[d]
start = start[0] + nx, start[1] + ny
return start == (0, 0) or d != 0
| [
"[email protected]"
] | |
460d5b12c08e5d071fb4b61103902d08761bbfd6 | 7239d389894613ef132edb1198a4f47cb2b65f92 | /packages/python/plotly/plotly/graph_objs/histogram/_hoverlabel.py | fbe525403ca58725c86a720504b77e42a4dc7ec7 | [
"MIT"
] | permissive | fast3dd13sa/plotly.py | 2169417b72481ff2937b5a9ce90d426cd1cccd80 | e778c6b5e6ae9665d7a5e2ddb666f43806df3959 | refs/heads/master | 2022-04-26T01:11:46.345181 | 2020-04-27T19:49:56 | 2020-04-27T19:49:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,798 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram"
_path_str = "histogram.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.histogram.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for namelength
.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
] | |
174ab03d35f1b83c388a52575470a997450147eb | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/centerface/dependency/centernet/src/lib/datasets/sample/multi_pose.py | 5f4ff97eb3a893a418e55fcd8f149478108e8a22 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 9,604 | py | """
MIT License
Copyright (c) 2019 Xingyi Zhou
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import cv2
from dependency.centernet.src.lib.utils.image import color_aug
from dependency.centernet.src.lib.utils.image import get_affine_transform, affine_transform
from dependency.centernet.src.lib.utils.image import gaussian_radius, draw_umich_gaussian
from dependency.extd.utils.augmentations import anchor_crop_image_sampling
def get_border(border, size):
"""
Get border
"""
i = 1
while size - border // i <= border // i: # size > 2 * (border // i)
i *= 2
return border // i
def coco_box_to_bbox(box):
"""
(x1, y1, w, h) -> (x1, y1, x2, y2)
"""
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)
return bbox
def preprocess_train(image, target, config):
"""
Preprocess training data
"""
data_rng = np.random.RandomState(123)
eig_val = np.array([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)
eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32).reshape((1, 1, 3))
std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32).reshape((1, 1, 3))
num_objs = len(target)
anns = []
for each in target:
ann = {}
ann['bbox'] = each[0:4]
ann['keypoints'] = each[4:]
anns.append(ann)
cv2.setNumThreads(0)
img, anns = anchor_crop_image_sampling(image, anns)
_, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
flipped = False
if config.rand_crop:
#s = s * np.random.choice(np.arange(0.8, 1.3, 0.05)) # for 768*768 or 800* 800
s = s * np.random.choice(np.arange(0.6, 1.0, 0.05)) # for 512 * 512
border = s * np.random.choice([0.1, 0.2, 0.25])
w_border = get_border(border, img.shape[1]) # w > 2 * w_border
h_border = get_border(border, img.shape[0]) # h > 2 * h_border
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = config.scale
cf = config.shift
c[0] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
c[1] += s * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
if np.random.random() < config.rotate:
rf = config.rotate
rot = np.clip(np.random.randn() * rf, -rf * 2, rf * 2)
if np.random.random() < config.flip: # opt.flip = 0.5
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(c, s, rot, [config.input_res, config.input_res])
inp = cv2.warpAffine(img, trans_input, (config.input_res, config.input_res), flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if config.color_aug:
color_aug(data_rng, inp, eig_val, eig_vec)
inp = (inp - mean) / std
inp = inp.transpose(2, 0, 1)
output_res = config.output_res
num_joints = config.num_joints
max_objs = config.max_objs
trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
# map
hm = np.zeros((config.num_classes, output_res, output_res), dtype=np.float32)
hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
wh = np.zeros((output_res, output_res, 2), dtype=np.float32)
reg = np.zeros((output_res, output_res, 2), dtype=np.float32)
ind = np.zeros((output_res, output_res), dtype=np.float32) # as float32, need no data_type change later
reg_mask = np.zeros((max_objs), dtype=np.uint8)
wight_mask = np.zeros((output_res, output_res, 2), dtype=np.float32)
kps = np.zeros((output_res, output_res, num_joints * 2), dtype=np.float32)
kps_mask = np.zeros((output_res, output_res, num_joints * 2), dtype=np.float32)
#
hp_offset = np.zeros((max_objs * num_joints, 2), dtype=np.float32)
hp_ind = np.zeros((max_objs * num_joints), dtype=np.int64)
hp_mask = np.zeros((max_objs * num_joints), dtype=np.int64)
draw_gaussian = draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = coco_box_to_bbox(ann['bbox']) # [x,y,w,h]--[x1,y1,x2,y2]
cls_id = 0 #int(ann['category_id']) - 1
pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3) # (x,y,0/1)
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
pts[:, 0] = width - pts[:, 0] - 1
for e in config.flip_idx: # flip_idx = [[0, 1], [3, 4]]
pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()
bbox[:2] = affine_transform(bbox[:2], trans_output) # [0, 1] -- (x1, y1)
bbox[2:] = affine_transform(bbox[2:], trans_output) # [2, 3] -- (x2, y2)
bbox = np.clip(bbox, 0, output_res - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if (h > 0 and w > 0) or (rot != 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
ind[ct_int[1], ct_int[0]] = 1.0
wh[ct_int[1], ct_int[0], :] = np.log(1. * w / 4), np.log(1. * h / 4)
reg[ct_int[1], ct_int[0], :] = ct[0] - ct_int[0], ct[1] - ct_int[1]
reg_mask[k] = 1.0
wight_mask[ct_int[1], ct_int[0], 0] = 1
wight_mask[ct_int[1], ct_int[0], 1] = 1
# if w*h <= 20: # can get what we want sometime, but unstable
# wight_mask[k] = 15
if w*h <= 40:
wight_mask[ct_int[1], ct_int[0], 0] = 5
wight_mask[ct_int[1], ct_int[0], 1] = 5
if w*h <= 20:
wight_mask[ct_int[1], ct_int[0], 0] = 10
wight_mask[ct_int[1], ct_int[0], 1] = 10
if w*h <= 10:
wight_mask[ct_int[1], ct_int[0], 0] = 15
wight_mask[ct_int[1], ct_int[0], 1] = 15
if w*h <= 4:
wight_mask[ct_int[1], ct_int[0], 0] = 0.1
wight_mask[ct_int[1], ct_int[0], 1] = 0.1
num_kpts = pts[:, 2].sum()
if num_kpts == 0:
hm[cls_id, ct_int[1], ct_int[0]] = 0.9999
hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
hp_radius = max(0, int(hp_radius))
for j in range(num_joints):
if pts[j, 2] > 0:
pts[j, :2] = affine_transform(pts[j, :2], trans_output_rot)
if pts[j, 0] >= 0 and pts[j, 0] < output_res and pts[j, 1] >= 0 and pts[j, 1] < output_res:
kps[ct_int[1], ct_int[0], j * 2 : j * 2 + 2] = pts[j, :2] - ct_int
kps[ct_int[1], ct_int[0], j * 2 : j * 2 + 1] = kps[ct_int[1], ct_int[0], j * 2 : j * 2 + 1] / w
kps[ct_int[1], ct_int[0], j * 2 + 1: j * 2 + 2] = kps[ct_int[1], ct_int[0],
j * 2 + 1 : j * 2 + 2] / h
kps_mask[ct_int[1], ct_int[0], j * 2 : j * 2 + 2] = 1.0
pt_int = pts[j, :2].astype(np.int32)
hp_offset[k * num_joints + j] = pts[j, :2] - pt_int
hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0]
hp_mask[k * num_joints + j] = 1
draw_gaussian(hm_hp[j], pt_int, hp_radius)
kps_mask[ct_int[1], ct_int[0], j * 2 : j * 2 + 2] = \
0.0 if ann['bbox'][2] * ann['bbox'][3] <= 8.0 else 1.0
draw_gaussian(hm[cls_id], ct_int, radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1] +
pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id])
return inp, hm, reg_mask, ind, wh, wight_mask, reg, kps_mask, kps
| [
"[email protected]"
] | |
c8fe58376e632a3abf6fabe21b845ea9bfca8392 | 493d5df9420ef94d9c5e82acb2d163e2a8c639b7 | /memo_app/forms.py | f9a73ac5260ef3004567845ec9abe38b54032eea | [] | no_license | reina0207/django | 0e3d6422c137be52978526128112ebf319e0f462 | c42744935043efdcc4b9f3f14641105d082d691a | refs/heads/master | 2023-08-13T14:10:59.979651 | 2021-10-17T01:48:48 | 2021-10-17T01:48:48 | 417,983,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from django import forms
from .models import Memo
class PostForm(forms.ModelForm):
class Meta:
model = Memo
fields = ['content']
widgets = {
'content':forms.Textarea
} | [
"[email protected]"
] | |
cfbbe9fd87346ac41ce6d9352492c08480e4ec86 | 3cdbe5f5810a035ae168f8ff01c39f58c571e428 | /golf/migrations/0047_auto_20171013_0759.py | 3a2a55b2b3c56503895e31b0b55d1c431628e5d5 | [
"MIT"
] | permissive | kenrumer/scorekeeper | ebd6467e2ecde3da96bb08ef77a56f967cbde00e | c7f22676e84dfdf6ca3361c6ff56719f68fce31f | refs/heads/master | 2021-01-22T05:01:11.370869 | 2018-01-12T07:13:20 | 2018-01-12T07:13:20 | 102,276,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-13 07:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('golf', '0046_auto_20171013_0021'),
]
operations = [
migrations.RemoveField(
model_name='round',
name='tournament',
),
migrations.AddField(
model_name='round',
name='tournament_date',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='golf.TournamentDate', verbose_name='Tournament and Date'),
),
]
| [
"[email protected]"
] | |
d5c5779cb06cd034955a358e57ccad53113de7b0 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories/141822/AXA_Telematics-master/Features/modules_janto/featureFun.py | 6077221b9a1eba16c0182067048fda6cda0a3b49 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,821 | py | # -*- coding: utf-8 -*-
"""
(c) 2015
@author: Janto Oellrich
email: [email protected]
CONTENT:
Contains FEATURE EXTRACTION funtions for
the AXA telematics competition.
FUNCTION LIST:
features: creates feature vector for one trip
driverFrame: creates feature matrix containing features
of all trips of one driver
createFeatmat: create feature matrix for all drivers
"""
from load import *
from modules import *
from paths import *
def features(trip,plotting=False):
"""
Extracts features of a trip dataframe.
OUTPUT:
np.array including features
list of angles between points in deg
"""
# 1. duration
duration = len(trip)
# 2. speed: euclidean distance between adjacent points
speed = np.sum(np.diff(trip,axis=0)**2,axis=1)**0.5
### 2.1. smooth GPS data (by convolution) ####
smooth_speed = movingaverage(speed,10)
#smooth_speed[np.where(smooth_speed>65)[0]] = smooth_speed[np.where(smooth_speed>65)[0]-1]
# head changes
head = np.diff(trip,axis=0)
head_x,head_y = head[:,0],head[:,1]
head_quantiles_x = ss.mstats.mquantiles(head_x,np.linspace(0.02,0.99,10))
head_quantiles_y = ss.mstats.mquantiles(head_y,np.linspace(0.02,0.99,10))
# compute speed statistics
mean_speed = smooth_speed.mean()
max_speed = max(smooth_speed)
std_speed = speed.std()
# 3. acceleration
smooth_accel = np.diff(smooth_speed)
# 3.1 get all negative acceleration values
accel_s = np.array(smooth_accel)
neg_accel = accel_s[accel_s<0]
pos_accel = accel_s[accel_s>0]
# 3.3 average breaking strength
mean_breaking = neg_accel.mean()
mean_acceleration = pos_accel.mean()
# summary statistics
std_breaking = neg_accel.std()
std_acceleration = pos_accel.std()
# 4. total distance traveled
total_dist = np.sum(smooth_speed,axis=0)
# 5. relative standzeit (last 5% are discarded due standing)
last = round(len(trip)*0.05)
eps = 1 # threshold for determining standing
# relative standzeit
speed_red = np.array(speed)[:last]
standzeit = len(speed_red[speed_red<0+eps])/float(duration)
#### DRIVING STYLE REALTED FEATURES ####
# 1. acceleration from stop
# 1.1 get end of stops: where is speed near zero
end_stops = stops(smooth_speed)
n_stops = len(end_stops) # how many stops
# 1.2 how does the driver accelerate from stop?
end_stops = end_stops.astype(int)[:-1,1]
# following interval
interval = 7 # 7 seconds following end of stop
# only those which dont exceed indices of trip
end_stops = end_stops[end_stops+interval<len(smooth_speed)-1]
n_stops = len(end_stops)
if n_stops>1:
anfahren = np.zeros(shape=(1,n_stops)) # initialize array
for i in range(n_stops):
# slope at acceleration
start = end_stops[i]
anfahren[0,i] = np.diff([smooth_speed[start],smooth_speed[start+interval]])
else:
anfahren = np.array([0])
# compute statistics
mean_anfahren = anfahren.mean()
max_anfahren = anfahren.max()
std_anfahren = anfahren.std()
# end cell
last_cell = rounddown(normalize(trip[-2:,:]),30)[-1]
# determine trip is a back-home trip
if last_cell[0]==0 and last_cell[1]==0:
hometrip=1
else:
hometrip=0
# speed quantiles
speed_quantiles = ss.mstats.mquantiles(smooth_speed,np.linspace(0.02,0.99,25))
# acceleration quantiles
accel_quantiles = ss.mstats.mquantiles(smooth_accel,np.linspace(0.02,0.99,25))
################# PLOTS #################
if plotting:
figure()
x = range(1,len(trip)) # x values for plotting
#plot(x,total_dist,label='velocity') #speed
hold('on')
#plot(x,accel,color='red',alpha=0.6,label='acceleration') #acceleration
grid('on')
xlabel('time')
# plot smoothed speed data
plot(smooth_speed,color='k',label='Spline Interpol')
# plot smoothed accelerationd data
plot(smooth_accel,'red',label='Acceleration')
legend(loc='best')
#legend()
######################################
return np.concatenate((speed_quantiles,accel_quantiles,head_quantiles_x,head_quantiles_y,np.array([duration,total_dist,standzeit,std_speed,std_breaking,std_acceleration,std_anfahren,mean_anfahren,max_anfahren,n_stops,hometrip])))
def driverFrame(driver,n_features=10):
# initialize dataframe
trips = np.zeros(shape=(200,n_features))
# load all trips at once
all_trips = loadDriver(driver)
counter = 0
for trip in all_trips:
trips[counter,:] = features(trip,False)
counter += 1
return trips
def createFeatmat():
"""
Computes the features of all trips and stores them in a matrix.
"""
driverFolder = DATA
# driver IDs
drivers = sorted([int(folderName) for folderName in os.listdir(driverFolder)])
print 'Creating feature matrix...'
n_feat = 81
for i,driver in enumerate(drivers):
if i == 0:
featmat = driverFrame(driver,n_feat)
else:
featmat = np.vstack((featmat,driverFrame(driver,n_feat)))
print '\t\t{0} trips, {1} features'.format(featmat.shape[0],featmat.shape[1])
# write to file
np.save(os.path.join(FEATURES,'featurematrix1.npy'))
return featmat
| [
"[email protected]"
] | |
7b1d36e6759d21e129f1ccb505e5824290d24a31 | 02d8a8b44dc9f8f3c63c2f62f24ceaee7d94fd12 | /apps/profile/views.py | ef24c418f7057f9700edd7b07e9c9801961c3ee3 | [] | no_license | bladas/transfer | 0970a4290e2e92e56853a64211ab3e79c479c0aa | 54c61b7bf340af4f48c7c7162805697b0417f4d7 | refs/heads/master | 2023-01-04T12:45:36.784275 | 2019-12-08T17:34:36 | 2019-12-08T17:34:36 | 224,606,015 | 0 | 0 | null | 2022-11-22T04:52:01 | 2019-11-28T08:31:59 | Python | UTF-8 | Python | false | false | 2,751 | py | from django.shortcuts import render, redirect
from django.views.generic import ListView, FormView
from apps.orders.models import *
from django.core import mail
from django.template.loader import render_to_string
conection = mail.get_connection()
conection.open()
class ProfileView(ListView):
template_name = 'profile.html'
model = Order
# paginate_by = 5
def get_queryset(self):
pass
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super().get_context_data(**kwargs)
# Add in a QuerySet of all the books
context['OrderFalse'] = Order.objects.filter(end=False).order_by('-id')
context['OrderTrue'] = Order.objects.filter(end=True).order_by('-id')
return context
def post(self, request, *args, **kwargs):
print(self.request.POST)
if self.request.method == "POST":
order_id = self.request.POST.get('order_id')
objects = Order.objects.get(pk=order_id)
print(objects.email)
email = objects.email
message = render_to_string('message/positive_message.html', {})
message2 = render_to_string('message/negative_message.html', {})
if self.request.POST.get('materialExampleRadios') == '1':
# Order.objects.update(end=True)
Order.objects.filter(pk=order_id).update(flag = 'Одобрено', end=True)
with mail.get_connection() as connection:
msg = mail.EmailMessage(
'Заказ трансфера по испанни', message,
'[email protected]', [email],
connection=connection,
)
msg.content_subtype = "html"
msg.send()
print("Отправлено одобрение")
return redirect('/')
elif self.request.POST.get('materialExampleRadios') == '2':
# Order.objects.update()
# Order.objects.update(flag = 'Отклонено')
Order.objects.filter(pk=order_id).update(flag = 'Отклонено',end=True)
with mail.get_connection() as connection:
msg = mail.EmailMessage(
'Заказ трансфера по испанни', message2,
'[email protected]', [email],
connection=connection,
)
msg.content_subtype = "html"
msg.send()
print("Отправлено отказ")
return redirect('/') | [
"[email protected]"
] | |
ae3dc2f7bf203c611851ed1cdfa6151cfb952a15 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/c748512c4c45e257bc625ccf036e18c86d69f1c8-<main>-fix.py | 759edf1deb6fc8e4fa82cf7d84981a26157ee87e | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py |
def main():
'Main program body.'
api_key = get_api_key()
parser = argparse.ArgumentParser(description='Start a new Shippable run.')
parser.add_argument('project', metavar='account/project', help='Shippable account/project')
target = parser.add_mutually_exclusive_group()
target.add_argument('--branch', help='branch name')
target.add_argument('--run', metavar='ID', help='Shippable run ID')
parser.add_argument('--key', metavar='KEY', default=api_key, required=(not api_key), help='Shippable API key')
parser.add_argument('--env', nargs=2, metavar=('KEY', 'VALUE'), action='append', help='environment variable to pass')
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
headers = dict(Authorization=('apiToken %s' % args.key))
data = dict(projectFullNames=args.project)
url = 'https://api.shippable.com/projects'
response = requests.get(url, data, headers=headers)
if (response.status_code != 200):
raise Exception(response.content)
result = response.json()
if (len(result) != 1):
raise Exception(('Received %d items instead of 1 looking for %s in:\n%s' % (len(result), args.project, json.dumps(result, indent=4, sort_keys=True))))
project_id = response.json()[0]['id']
data = dict(globalEnv=dict(((kp[0], kp[1]) for kp in (args.env or []))))
if args.branch:
data['branchName'] = args.branch
elif args.run:
data['runId'] = args.run
url = ('https://api.shippable.com/projects/%s/newBuild' % project_id)
response = requests.post(url, json=data, headers=headers)
if (response.status_code != 200):
raise Exception(('HTTP %s: %s\n%s' % (response.status_code, response.reason, response.content)))
print(json.dumps(response.json(), indent=4, sort_keys=True))
| [
"[email protected]"
] | |
104fc669d2af9fa208c4aed1d720c01d359a05f9 | e98f2775d0b7b9aca26e4ef6a3c08ca0100a1442 | /tensorflow_federated/python/core/backends/mapreduce/test_utils.py | f1aeca67ea8ccadf1f89de7152c7f2c39569ec29 | [
"Apache-2.0"
] | permissive | sgpohlj87/federated | 54131d8e62a3df1b0bb396d11ba6e6a2e1ada11d | ca179ac0e2c0cf9c33169d13fbb44668cbab6982 | refs/heads/master | 2020-07-18T22:34:06.001195 | 2019-09-03T22:12:33 | 2019-09-03T22:13:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,890 | py | # Lint as: python2, python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils that support unit tests in this component."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.backends.mapreduce import canonical_form
from tensorflow_federated.python.core.impl.compiler import building_blocks
def get_temperature_sensor_example():
"""Constructs `canonical_form.CanonicalForm` for temperature sensors example.
The temperature sensor example computes the fraction of sensors that report
temperatures over the threshold.
Returns:
An instance of `canonical_form.CanonicalForm`.
"""
@computations.tf_computation
def initialize():
return {'num_rounds': tf.constant(0)}
# The state of the server is a singleton tuple containing just the integer
# counter `num_rounds`.
server_state_type = computation_types.NamedTupleType([('num_rounds', tf.int32)
])
@computations.tf_computation(server_state_type)
def prepare(state):
return {'max_temperature': 32.0 + tf.cast(state.num_rounds, tf.float32)}
# The initial state of the client is a singleton tuple containing a single
# float `max_temperature`, which is the threshold received from the server.
client_state_type = computation_types.NamedTupleType([('max_temperature',
tf.float32)])
# The client data is a sequence of floats.
client_data_type = computation_types.SequenceType(tf.float32)
@computations.tf_computation(client_data_type, client_state_type)
def work(data, state):
"""See the `canonical_form.CanonicalForm` definition of `work`."""
def fn(s, x):
return {
'num': s['num'] + 1,
'max': tf.maximum(s['max'], x),
}
reduce_result = data.reduce({
'num': np.int32(0),
'max': np.float32(-459.67)
}, fn)
return ({
'is_over': reduce_result['max'] > state.max_temperature
}, {
'num_readings': reduce_result['num']
})
# The client update is a singleton tuple with a Boolean-typed `is_over`.
client_update_type = computation_types.NamedTupleType([('is_over', tf.bool)])
# The accumulator for client updates is a pair of counters, one for the
# number of clients over threshold, and the other for the total number of
# client updates processed so far.
accumulator_type = computation_types.NamedTupleType([('num_total', tf.int32),
('num_over', tf.int32)])
@computations.tf_computation
def zero():
return collections.OrderedDict([('num_total', tf.constant(0)),
('num_over', tf.constant(0))])
@computations.tf_computation(accumulator_type, client_update_type)
def accumulate(accumulator, update):
return collections.OrderedDict([
('num_total', accumulator.num_total + 1),
('num_over', accumulator.num_over + tf.cast(update.is_over, tf.int32))
])
@computations.tf_computation(accumulator_type, accumulator_type)
def merge(accumulator1, accumulator2):
return collections.OrderedDict([
('num_total', accumulator1.num_total + accumulator2.num_total),
('num_over', accumulator1.num_over + accumulator2.num_over)
])
@computations.tf_computation(merge.type_signature.result)
def report(accumulator):
return {
'ratio_over_threshold': (tf.cast(accumulator['num_over'], tf.float32) /
tf.cast(accumulator['num_total'], tf.float32))
}
# The type of the combined update is a singleton tuple containing a float
# named `ratio_over_threshold`.
combined_update_type = computation_types.NamedTupleType([
('ratio_over_threshold', tf.float32)
])
@computations.tf_computation(server_state_type, combined_update_type)
def update(state, update):
return ({'num_rounds': state.num_rounds + 1}, update)
return canonical_form.CanonicalForm(initialize, prepare, work, zero,
accumulate, merge, report, update)
def get_mnist_training_example():
"""Constructs `canonical_form.CanonicalForm` for mnist training.
Returns:
An instance of `canonical_form.CanonicalForm`.
"""
model_nt = collections.namedtuple('Model', 'weights bias')
server_state_nt = (collections.namedtuple('ServerState', 'model num_rounds'))
# Start with a model filled with zeros, and the round counter set to zero.
@computations.tf_computation
def initialize():
return server_state_nt(
model=model_nt(weights=tf.zeros([784, 10]), bias=tf.zeros([10])),
num_rounds=tf.constant(0))
server_state_tff_type = server_state_nt(
model=model_nt(weights=(tf.float32, [784, 10]), bias=(tf.float32, [10])),
num_rounds=tf.int32)
client_state_nt = (
collections.namedtuple('ClientState', 'model learning_rate'))
# Pass the model to the client, along with a dynamically adjusted learning
# rate that starts at 0.1 and decays exponentially by a factor of 0.9.
@computations.tf_computation(server_state_tff_type)
def prepare(state):
learning_rate = 0.1 * tf.pow(0.9, tf.cast(state.num_rounds, tf.float32))
return client_state_nt(model=state.model, learning_rate=learning_rate)
batch_nt = collections.namedtuple('Batch', 'x y')
batch_tff_type = batch_nt(x=(tf.float32, [None, 784]), y=(tf.int32, [None]))
dataset_tff_type = computation_types.SequenceType(batch_tff_type)
model_tff_type = model_nt(
weights=(tf.float32, [784, 10]), bias=(tf.float32, [10]))
client_state_tff_type = client_state_nt(
model=model_tff_type, learning_rate=tf.float32)
loop_state_nt = collections.namedtuple('LoopState', 'num_examples total_loss')
update_nt = collections.namedtuple('Update', 'model num_examples loss')
stats_nt = collections.namedtuple('Stats', 'num_examples loss')
# Train the model locally, emit the loclaly-trained model and the number of
# examples as an update, and the average loss and the number of examples as
# local client stats.
@computations.tf_computation(dataset_tff_type, client_state_tff_type)
def work(data, state): # pylint: disable=missing-docstring
model_vars = model_nt(
weights=tf.Variable(initial_value=state.model.weights, name='weights'),
bias=tf.Variable(initial_value=state.model.bias, name='bias'))
with tf.control_dependencies([tf.global_variables_initializer()]):
init_model = tf.group(
tf.assign(model_vars.weights, state.model.weights),
tf.assign(model_vars.bias, state.model.bias))
optimizer = tf.train.GradientDescentOptimizer(state.learning_rate)
@tf.function
def reduce_fn(loop_state, batch):
pred_y = tf.nn.softmax(
tf.matmul(batch.x, model_vars.weights) + model_vars.bias)
loss = -tf.reduce_mean(
tf.reduce_sum(
tf.one_hot(batch.y, 10) * tf.log(pred_y), reduction_indices=[1]))
with tf.control_dependencies([optimizer.minimize(loss)]):
return loop_state_nt(
num_examples=loop_state.num_examples + 1,
total_loss=loop_state.total_loss + loss)
with tf.control_dependencies([init_model]):
loop_state = data.reduce(
loop_state_nt(num_examples=0, total_loss=np.float32(0.0)), reduce_fn)
num_examples = loop_state.num_examples
total_loss = loop_state.total_loss
with tf.control_dependencies([num_examples, total_loss]):
loss = total_loss / tf.cast(num_examples, tf.float32)
return (update_nt(model=model_vars, num_examples=num_examples, loss=loss),
stats_nt(num_examples=num_examples, loss=loss))
accumulator_nt = update_nt
# Initialize accumulators for aggregation with zero model and zero examples.
@computations.tf_computation
def zero():
return accumulator_nt(
model=model_nt(weights=tf.zeros([784, 10]), bias=tf.zeros([10])),
num_examples=tf.constant(0),
loss=tf.constant(0.0, dtype=tf.float32))
update_tff_type = update_nt(
model=model_tff_type, num_examples=tf.int32, loss=tf.float32)
accumulator_tff_type = update_tff_type
# We add an update to an accumulator with the update's model multipled by the
# number of examples, so we can compute a weighted average in the end.
@computations.tf_computation(accumulator_tff_type, update_tff_type)
def accumulate(accumulator, update):
scaling_factor = tf.cast(update.num_examples, tf.float32)
scaled_model = tf.nest.map_structure(lambda x: x * scaling_factor,
update.model)
return accumulator_nt(
model=tf.nest.map_structure(tf.add, accumulator.model, scaled_model),
num_examples=accumulator.num_examples + update.num_examples,
loss=accumulator.loss + update.loss * scaling_factor)
# Merging accumulators does not involve scaling.
@computations.tf_computation(accumulator_tff_type, accumulator_tff_type)
def merge(accumulator1, accumulator2):
return accumulator_nt(
model=tf.nest.map_structure(tf.add, accumulator1.model,
accumulator2.model),
num_examples=accumulator1.num_examples + accumulator2.num_examples,
loss=accumulator1.loss + accumulator2.loss)
report_nt = accumulator_nt
# The result of aggregation is produced by dividing the accumulated model by
# the total number of examples. Same for loss.
@computations.tf_computation(accumulator_tff_type)
def report(accumulator):
scaling_factor = 1.0 / tf.cast(accumulator.num_examples, tf.float32)
scaled_model = model_nt(
weights=accumulator.model.weights * scaling_factor,
bias=accumulator.model.bias * scaling_factor)
return report_nt(
model=scaled_model,
num_examples=accumulator.num_examples,
loss=accumulator.loss * scaling_factor)
report_tff_type = accumulator_tff_type
metrics_nt = collections.namedtuple('Metrics', 'num_rounds num_examples loss')
# Pass the newly averaged model along with an incremented round counter over
# to the next round, and output the counters and loss as server metrics.
@computations.tf_computation(server_state_tff_type, report_tff_type)
def update(state, report):
num_rounds = state.num_rounds + 1
return (server_state_nt(model=report.model, num_rounds=num_rounds),
metrics_nt(
num_rounds=num_rounds,
num_examples=report.num_examples,
loss=report.loss))
return canonical_form.CanonicalForm(initialize, prepare, work, zero,
accumulate, merge, report, update)
def construct_example_training_comp():
"""Constructs a `tff.utils.IterativeProcess` via the FL API."""
np.random.seed(0)
sample_batch = collections.OrderedDict([('x',
np.array([[1., 1.]],
dtype=np.float32)),
('y', np.array([[0]],
dtype=np.int32))])
def model_fn():
"""Constructs keras model."""
keras_model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
1,
activation=tf.nn.softmax,
kernel_initializer='zeros',
input_shape=(2,))
])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred))
keras_model.compile(
loss=loss_fn,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
return tff.learning.build_federated_averaging_process(model_fn)
def computation_to_building_block(comp):
return building_blocks.ComputationBuildingBlock.from_proto(
comp._computation_proto) # pylint: disable=protected-access
| [
"[email protected]"
] | |
ab0a2ba6e4d3d375e56ab4ac661c7d7ddf068306 | ec80f504ab4511e27c7e4afe801b8e6b8ddf6900 | /examples/ex_cnn_cascade_training_face_detection/12net_detection_training.py | 0f64a4a441f8628405401d4cb2a8c19f4f58f298 | [
"MIT"
] | permissive | ssteveminq/deepgaze | afdc3aeb7d0b0ba247a3cf5850981f899abb3082 | 6df11f3799b36d7d0d45fbbb77be21eec456ddd4 | refs/heads/master | 2021-01-25T10:06:11.775804 | 2019-04-11T18:18:14 | 2019-04-11T18:18:14 | 123,338,160 | 0 | 1 | MIT | 2018-02-28T20:23:47 | 2018-02-28T20:23:47 | null | UTF-8 | Python | false | false | 12,414 | py | #!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2017 Massimiliano Patacchiola
# https://mpatacchiola.github.io
# https://mpatacchiola.github.io/blog/
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
import cv2 # to visualize a preview
import datetime
import os
def main():
# Load the standard file
image_size = 12
batch_size = 64
patch_size = 5
num_labels = 2
num_channels = 3 # colour
tot_epochs = 10000 # epochs
# Change this path based on your datasets location
pickle_file_positive = "./positive_dataset_12net_98452.pickle"
pickle_file_negative = "./negative_dataset_12net_198081.pickle"
with open(pickle_file_positive, 'rb') as f:
save = pickle.load(f)
train_dataset_positive = save['training_dataset']
train_label_positive = save['training_label']
del save # hint to help gc free up memory
# Here we take only part of the train and test set
print('Training set', train_dataset_positive.shape, train_label_positive.shape)
with open(pickle_file_negative, 'rb') as f:
save = pickle.load(f)
train_dataset_negative = save['training_dataset']
train_label_negative = save['training_label']
del save # hint to help gc free up memory
# Here we take only part of the train and test set
print('Training set', train_dataset_negative.shape, train_label_negative.shape)
# Creating the test set taking the first 100 images
test_dataset = np.concatenate((train_dataset_positive[0:100, :, :], train_dataset_negative[0:100, :, :]), axis=0)
test_label = np.concatenate((train_label_positive[0:100, :], train_label_negative[0:100, :]), axis=0)
train_dataset_positive = train_dataset_positive[100:, :, :]
train_dataset_negative = train_dataset_negative[100:, :, :]
train_label_positive = train_label_positive[100:, :]
train_label_negative = train_label_negative[100:, :]
#Estimating the number of elements in both datasets
total_positive = train_dataset_positive.shape[0]
total_negative = train_dataset_negative.shape[0]
# Normalisation
#train_dataset -= 127
#validation_dataset -= 127
#test_dataset -= 127
#train_dataset /= 255
#validation_dataset /= 255
#test_dataset /= 255
graph = tf.Graph()
with graph.as_default():
tf_initializer = None #tf.random_normal_initializer()
# Input data.
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_test_dataset = tf.placeholder(tf.float32, shape=(None, image_size, image_size, num_channels))
# Conv layer
# [patch_size, patch_size, num_channels, depth]
conv1_weights = tf.get_variable("conv1_12d_w", [3, 3, num_channels, 16], initializer=tf_initializer)
conv1_biases = tf.Variable(tf.zeros([16]), name="conv1_12d_b")
# Dense layer
# [ 5*5 * previous_layer_out , num_hidden] wd1
# after pooling the 12x12 image is reduced to size 6x6
dense1_weights = tf.get_variable("dense1_12d_w", [6 * 6 * 16, 16], initializer=tf_initializer)
dense1_biases = tf.Variable(tf.random_normal(shape=[16]), name="dense1_12d_b")
# Output layer
layer_out_weights = tf.get_variable("out_12d_w", [16, num_labels], initializer=tf_initializer)
layer_out_biases = tf.Variable(tf.random_normal(shape=[num_labels]), name="out_12d_b")
# dropout (keep probability)
keep_prob = tf.placeholder(tf.float32)
# Model.
def model(data, _dropout=1.0):
X = tf.reshape(data, shape=[-1, image_size, image_size, num_channels])
print("SHAPE X: " + str(X.get_shape())) # Convolution Layer 1
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(X, conv1_weights, strides=[1, 1, 1, 1], padding='SAME'), conv1_biases))
print("SHAPE conv1: " + str(conv1.get_shape()))
# Max Pooling (down-sampling)
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
print("SHAPE pool1: " + str(pool1.get_shape()))
# Apply Normalization
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
# Apply Dropout
norm1 = tf.nn.dropout(norm1, _dropout)
# Fully connected layer
dense1 = tf.reshape(norm1, [-1, dense1_weights.get_shape().as_list()[0]]) # Reshape conv3
print("SHAPE dense1: " + str(dense1.get_shape()))
dense1 = tf.nn.relu(tf.matmul(dense1, dense1_weights) + dense1_biases) # Relu
dense1 = tf.nn.dropout(dense1, _dropout)
# Output layer
out = tf.matmul(dense1, layer_out_weights) + layer_out_biases
print("SHAPE out: " + str(out.get_shape()))
# Return the output with logits
return out
# Training computation.
logits = model(tf_train_dataset, keep_prob)
loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
#- Adding the regularization terms to the loss
#beta = 5e-4 #it was: 5e-4 = 0.0005
#loss += (beta * tf.nn.l2_loss(conv1_weights))
#loss += (beta * tf.nn.l2_loss(dense1_weights))
#loss += (beta * tf.nn.l2_loss(layer_out_weights))
loss_summ = tf.summary.scalar("loss", loss)
# Find the batch accuracy and save it in summary
accuracy = tf.equal(tf.argmax(tf_train_labels, 1), tf.argmax(logits, 1))
accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))
accuracy_summary = tf.summary.scalar("accuracy", accuracy)
# Optimizer.
# learning_rate = 0.001 #it was: 0.001
global_step = tf.Variable(0, trainable=False) # count the number of steps taken.
#learning_rate = tf.train.exponential_decay(0.000098, global_step, 15000, 0.1, staircase=True)
#lrate_summ = tf.scalar_summary("learning rate", learning_rate)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
#optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss, global_step=global_step)
#optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, name='RMSProp').minimize(loss, global_step=global_step)
#optimizer = tf.train.AdagradOptimizer(learning_rate=0.00625).minimize(loss, global_step=global_step)
#optimizer = tf.train.MomentumOptimizer(learning_rate=0.0001, momentum=0.95).minimize(loss, global_step=global_step)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.001, rho=0.95, epsilon=1e-08, use_locking=False, name='Adadelta').minimize(loss, global_step=global_step)
# Predictions for the training, validation, and test data.
train_prediction = logits
# Call test_prediction and pass the test inputs to have test accuracy
test_prediction = model(tf_test_dataset)
_, test_accuracy = tf.metrics.accuracy(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
_, test_recall = tf.metrics.recall(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
_, test_precision = tf.metrics.precision(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
_, test_false_positives = tf.metrics.false_positives(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
_, test_false_negatives = tf.metrics.false_negatives(labels=tf.argmax(test_label, 1), predictions=tf.argmax(test_prediction, 1))
# Save all the variables
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# Summary definition
merged_summaries = tf.summary.merge_all()
now = datetime.datetime.now()
log_path = "./logs/log_12net_detection_" + str(now.hour) + str(now.minute) + str(now.second)
writer_summaries = tf.summary.FileWriter(log_path, session.graph)
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
# tf.initialize_all_variables().run()
print('Initialized')
for step in range(tot_epochs):
# Pick random images in euqal number from positive and negative dataset
quantity_positive = int(batch_size/2)
quantity_negative = batch_size - quantity_positive
indices_positive = np.random.randint(total_positive, size=quantity_positive)
indices_negative = np.random.randint(total_negative, size=quantity_negative)
batch_data = np.concatenate((np.take(train_dataset_positive, indices_positive, axis=0),
np.take(train_dataset_negative, indices_negative, axis=0)))
batch_labels = np.concatenate((np.take(train_label_positive, indices_positive, axis=0),
np.take(train_label_negative, indices_negative, axis=0)))
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob: 1.0}
_, acc, l, predictions, my_summary = session.run([optimizer, accuracy, loss, train_prediction, merged_summaries],
feed_dict=feed_dict)
writer_summaries.add_summary(my_summary, step)
if (step % 100 == 0):
print("")
print("Loss at step: ", step, " is " , l)
print("Global Step: " + str(global_step.eval()) + " of " + str(tot_epochs))
#print("Learning Rate: " + str(learning_rate.eval()))
print("Minibatch size: " + str(batch_labels.shape))
print("Accuracy: " + str(acc))
print("")
# Save and test the network
checkpoint_path = "./checkpoints/12net_detection_" + str(now.hour) + str(now.minute) + str(now.second)
if not os.path.exists(checkpoint_path):
os.makedirs(checkpoint_path)
saver.save(session, checkpoint_path + "/cnn_12net_detection" , global_step=step) # save the session
feed_dict = {tf_test_dataset: test_dataset, keep_prob: 1.0}
test_acc, test_rec, test_prec, test_fp, test_fn = session.run([test_accuracy, test_recall, test_precision, test_false_positives, test_false_negatives], feed_dict=feed_dict)
print("# Tot. images tested: " + str(test_dataset.shape[0]))
print("# Test accuracy: " + str(test_acc))
print("# Test recall: " + str(test_rec))
print("# Test precision: " + str(test_prec))
print("# Test false positives: " + str(test_fp))
print("# Test false negatives: " + str(test_fn))
print("")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4598001c5648f08752ef2002d4ba2a58a4b810b4 | 94e06376dc265c7bf1a2e51acb9714d02b21503a | /python打卡/day9_数字.py | 0303d4bab06f6795e5925726a39bafdecf382745 | [] | no_license | zhangquanliang/python | 4b2db32bed4e4746c8c49c309563f456dc41c6be | f45ef96e385b1cd6c5dfb53bf81042d953a9ec46 | refs/heads/master | 2021-04-26T23:30:12.217397 | 2019-03-20T06:18:14 | 2019-03-20T06:18:14 | 124,005,916 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | # -*- coding: utf-8 -*-
# 1. 内嵌整数列表中的指相加
def nested_sum(t):
a = 0
for x in t:
for y in x:
a += y
print(a)
# 2. 接受数字列表,返回累计和
def cumsum(t):
list = []
a = 0
for x in t:
a += x
list.append(a)
print(list)
# 3. 接受一个列表,返回新列表,包含除第一个和最后一个元素外的所有值
def middle(t):
t.pop(0)
t.pop()
print(t)
# t = [1, 2, 3, 4, 1212, 121]
# middle(t)
# 4. 斐波纳契数列
a, b = 0, 1
for i in range(1, 13):
print('第%s个月:%s对兔子' % (i, b))
a, b = b, a + b | [
"[email protected]"
] | |
68107dada2e7dd7dc4eabd477e86ea95d7540946 | e719bcfde03c0be2c84a7f1e13d12b80fa00ea84 | /session2/draw_2.py | 0780ebd4934bca50609387bc0e008130c608d56c | [] | no_license | minhduc9699/phamMinhDuc-D4E17 | 53b80e53ff175f0357fb01c9876aa12b343ca060 | 363b61745a206f33c5cfa3532a5abd920fcf4ad1 | refs/heads/master | 2023-01-22T08:25:14.210959 | 2020-12-05T04:53:38 | 2020-12-05T04:53:38 | 308,668,542 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from turtle import *
speed(-1)
for edge in range(3, 11):
print(edge)
for i in range(edge):
forward(100)
left(360/edge)
mainloop() | [
"[email protected]"
] | |
b53a79653da1f30b4346d7cee4b0c1ab43348665 | 74167e4c1061b454d1ab1c2140a1fc2f4540ee2e | /accounts/models.py | fc5a140def7581969baf9c6413966fd5a150517c | [] | no_license | Pagante/ProjectCart | f72a1a611445f66c1320c0c21e1832d3ecf67a2a | 4f065a02a8235c6744768328af5c1e103321ed44 | refs/heads/main | 2023-06-05T23:53:10.316222 | 2021-06-27T21:47:28 | 2021-06-27T21:47:28 | 380,840,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,861 | py | from django.db import models
from django.contrib.auth.models import BaseUserManager,AbstractBaseUser
# Create your models here.
class MyAccountManager(BaseUserManager):
def create_user(self, first_name, last_name, username, email, password=None):
if not 'email':
raise ValueError('User must have an email')
if not username:
raise ValueError('User must have a username')
user = self.model(
email = self.normalize_email(email),
username = username,
first_name = first_name,
last_name = last_name
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, first_name, last_name, username, email, password):
user = self.create_user (
email = self.normalize_email(email),
username = username,
password= password,
first_name= first_name,
last_name= last_name,
)
user.is_admin = True
user.is_active = True
user.is_staff = True
user.is_superadmin = True
user.save(using= self._db)
return user
class Account(AbstractBaseUser):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
username = models.CharField(max_length=100, unique=True)
email = models.CharField(max_length=100, unique=100)
phone_number = models.CharField(max_length=50)
# required Field
date_joined = models.DateTimeField(auto_now_add=True)
last_login = models.DateTimeField(auto_now_add=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
is_superadmin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username', 'first_name', 'last_name']
objects = MyAccountManager()
def fullName(self):
return f"{self.first_name} {self.last_name}"
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return self.is_admin
def has_module_perms(self, obj_module):
return True
class UserProfile(models.Model):
user = models.OneToOneField(Account, on_delete=models.CASCADE)
address_line_1 = models.CharField(blank=True, max_length=100)
address_line_2 = models.CharField(blank=True, max_length=100)
profile_picture = models.ImageField(upload_to='userprofile', blank=True)
city = models.CharField(blank=True, max_length=50)
state = models.CharField(blank=True, max_length=50)
country = models.CharField(blank=True, max_length=50)
def __str__(self):
return self.user.first_name
def fullAddress(self):
return f'{self.address_line_1} {self.address_line_2}'
| [
"[email protected]"
] | |
cd46193f2107a70f24bf853229b251e11f09edd3 | 5989e503a733e8b29f4c502008446a75c2b43ff8 | /src/aids/migrations/0080_auto_20191104_1028.py | e61367fa67ca3885c3616e240f880421e5dac253 | [] | no_license | samuelpath/aides-territoires | 399a6a7b0607ef5a8d2b327247446b239f5b1a42 | 5793bd49d7157a34e08c29e56a46e1e3ead0651f | refs/heads/master | 2022-12-20T14:35:18.671563 | 2020-08-21T08:00:33 | 2020-08-21T08:00:33 | 288,424,578 | 0 | 0 | null | 2020-08-18T10:27:17 | 2020-08-18T10:27:16 | null | UTF-8 | Python | false | false | 389 | py | # Generated by Django 2.2.5 on 2019-11-04 09:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('aids', '0079_remove_aid_subvention_rate'),
]
operations = [
migrations.RenameField(
model_name='aid',
old_name='subvention_rate_range',
new_name='subvention_rate',
),
]
| [
"[email protected]"
] | |
1e838cff1c3206fca261549ede085035a1794d7c | b301f5d799fb973f12ff457c94a3fb54f5c6fd6b | /pages/views.py | 56e3848a96f39428eb0488fb6874d562f048fe72 | [] | no_license | MahmudulHassan5809/DjangoHousingSellingProject | ca3a8b9e3d83dd87532b33295e56e50ba7e9576d | 82d02e04fe2a0cd510f160ad4159f40f4e5779d3 | refs/heads/master | 2020-04-09T08:01:41.092034 | 2018-12-03T11:24:13 | 2018-12-03T11:24:13 | 160,179,800 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | from django.shortcuts import render,redirect
from django.http import HttpResponse
from listings.choices import price_choices , bedroom_choices , state_choices
from listings.models import Listing
from realtors.models import Realtor
# Create your views here.
def index(request):
#return HttpResponse('Hello');
listings = Listing.objects.order_by('-list_date').filter(is_published=True);
return render(request , 'pages/index.html',{'listings' : listings ,
'state_choices' : state_choices,
'bedroom_choices' : bedroom_choices,
'price_choices' : price_choices,
})
def about(request):
realtors = Realtor.objects.order_by('-hire_date')
mvp_realtors = Realtor.objects.all().filter(is_mvp=True)
context = {
'realtors' : realtors,
'mvp_realtors' : mvp_realtors
}
return render(request , 'pages/about.html',context)
| [
"[email protected]"
] | |
fb58a27373295fd23e4e441d7160e90f57d8c58a | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/local_rulestacks_create_or_update_minimum_set_gen.py | eb4b8b0451f4477d85ec725f4dfa1603454d7a23 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,765 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworksngfw import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworksngfw
# USAGE
python local_rulestacks_create_or_update_minimum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="2bf4a339-294d-4c25-b0b2-ef649e9f5c27",
)
response = client.local_rulestacks.begin_create_or_update(
resource_group_name="rgopenapi",
local_rulestack_name="lrs1",
resource={"location": "eastus", "properties": {}},
).result()
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_CreateOrUpdate_MinimumSet_Gen.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
a94e1bb1dc306e6a03ea0107933cb542bdaea003 | 50671b3e4e8ed7e9702c9941bb71fdbf92dffbe6 | /src/cogs/events.py | 0b28700d34057dbeef8be93aeb3c40ea8a08314b | [] | no_license | pikoUsername/Emulator | 3dd67d0d3934c2ec9283b9b52edebec31c654326 | 96e6563c7cbcea051e4e41a377d917a2a9f5528a | refs/heads/main | 2023-02-27T22:42:43.154987 | 2021-02-09T14:51:56 | 2021-02-09T14:51:56 | 321,045,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,199 | py | import os
import sys
from discord.ext import commands
from discord.ext.commands import errors
import discord
from loguru import logger
from ..models import Guild
from ..utils.notify import notify_all_owners
class DiscordEvents(commands.Cog, name="Events"):
__slots__ = "bot",
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
if os.environ.get("notify_admins"):
await notify_all_owners(self.bot, text="BOT STARTED")
@commands.Cog.listener()
async def on_guild_join(self, guild: discord.Guild):
g = await Guild.query.where(guild.id == Guild.guild_id).gino.first()
g.add_guild(guild)
@commands.Cog.listener()
async def on_guild_remove(self, guild: discord.Guild):
await self.bot.fm.delete_all_guild_files(guild.id)
g = await Guild.get_guild(guild.id)
await g.delete()
logger.info("leaved and deleted thats guild folder")
@commands.Cog.listener()
async def on_command_error(self, ctx, err):
if isinstance(err, errors.MissingRequiredArgument) or isinstance(err, errors.BadArgument):
helper = str(ctx.invoked_subcommand) if ctx.invoked_subcommand else str(ctx.command)
await ctx.send_help(helper)
elif isinstance(err, errors.CommandInvokeError):
logger.exception(f"{err}, {sys.exc_info()}")
if "2000 or fewer" in str(err) and len(ctx.message.clean_content) > 1900:
return await ctx.send(
"You attempted to make the command display more than 2,000 characters...\n"
"Both error and command will be ignored."
)
await ctx.send(embed=discord.Embed(
title="Error on processing Command",
description=f"```{err}```",
), delete_after=30)
elif isinstance(err, errors.MissingPermissions):
await ctx.send(embed=discord.Embed(
title=f"Fail {self.bot.X_EMOJI}",
description="Permission ERROR"))
elif isinstance(err, errors.CheckFailure):
await ctx.send(embed=discord.Embed(
title=f"Fail {self.bot.X_EMOJI}",
description="You cant made this"))
elif isinstance(err, errors.MaxConcurrencyReached):
await ctx.send(
"You've reached max capacity of command usage at once, please finish the previous one...",
delete_after=30)
elif isinstance(err, errors.CommandOnCooldown):
await ctx.send(
f"This command is on cool down... try again in {err.retry_after:.2f} seconds.",
delete_after=30)
elif isinstance(err, errors.CommandNotFound):
pass
elif isinstance(err, errors.NoPrivateMessage):
await ctx.send(
embed=discord.Embed(title="Private message Not work",
description="Bot work only in guild channels"))
else:
logger.exception(err)
await self.bot.send_error(ctx, err)
def setup(bot):
bot.add_cog(DiscordEvents(bot))
| [
"[email protected]"
] | |
6fc2e9842a862e151818555c40bd68c1fe986ae7 | aa5d98396184ab9dc479075b37a3664c385de027 | /tests/selenium/breadcrumb_test.py | 8ae1041e04352b0b8d70180fdda1d4cfface3872 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | uk-gov-mirror/ONSdigital.sbr-ui | c6a66cd6982e9e98a991eadbb8cef0f1fb6ba2bf | 48bbfdc59e393dd4d2d008b8414ac96d2e2be44f | refs/heads/master | 2021-10-12T00:02:34.160448 | 2018-10-17T14:59:04 | 2018-10-17T14:59:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,910 | py | import unittest
from selenium import webdriver
from tests.helper_methods import create_selenium_config
from tests.constants import BASE_URL, SEARCH_URL
from tests.constants import ENTERPRISE, LOCAL_UNIT, REPORTING_UNIT, LEGAL_UNIT, COMPANY_HOUSE, VALUE_ADDED_TAX, PAY_AS_YOU_EARN
from tests.constants import BREADCRUMB_SEARCH_ID, BREADCRUMB_SELECTED_ID, BREADCRUMB_ENT_ID, BREADCRUMB_LEU_ID
from tests.constants import SEARCH_BUTTON_ID, PERIOD_INPUT_ID, UNIT_TYPE_INPUT_ID
from tests.constants import USERNAME_INPUT_ID, PASSWORD_INPUT_ID, SEARCH_INPUT_ID, LOGIN_BUTTON_ID, LOGOUT_BUTTON_ID
from tests.constants import ERN, UBRN, RURN, LURN, VATREF, PAYEREF, CRN, PERIOD
from tests.constants import ADMIN_USERNAME, ADMIN_PASSWORD
class BreadcrumbTest(unittest.TestCase):
"""
The breadcrumb is present on each unit page and allows navigation up the unit hierarchy.
TODO: test for when a breadcrumb link returns 404/500
"""
def setUp(self):
self.options = create_selenium_config()
self.driver = webdriver.Firefox(firefox_options=self.options)
self.driver.get(BASE_URL)
self.driver.find_element_by_id(USERNAME_INPUT_ID).send_keys(ADMIN_USERNAME)
self.driver.find_element_by_id(PASSWORD_INPUT_ID).send_keys(ADMIN_PASSWORD)
self.driver.find_element_by_id(LOGIN_BUTTON_ID).click()
def tearDown(self):
self.driver.find_element_by_id(LOGOUT_BUTTON_ID).click()
self.driver.quit()
def search_by_unit_id_type_period(self, unit_id, unit_type, period):
self.driver.find_element_by_id(SEARCH_INPUT_ID).send_keys(unit_id)
self.driver.find_element_by_id(UNIT_TYPE_INPUT_ID).send_keys(unit_type)
self.driver.find_element_by_id(PERIOD_INPUT_ID).send_keys(period)
self.driver.find_element_by_id(SEARCH_BUTTON_ID).click()
def assert_breadcrumb_item_text_and_url(self, breadcrumb_id, unit_id, unit_type, period):
breadcrumb_item = self.driver.find_element_by_id(breadcrumb_id)
self.assertEqual(breadcrumb_item.text, f'{unit_type} - {unit_id}')
target_url = f'{SEARCH_URL}/periods/{period}/types/{unit_type}/units/{unit_id}'
self.assertEqual(breadcrumb_item.get_attribute('href'), target_url)
def assert_current_breadcrumb_item_text(self, expected_text):
current_item_text = self.driver.find_element_by_id(BREADCRUMB_SELECTED_ID).text
self.assertEqual(current_item_text, expected_text)
def assert_breadcrumb_search_href(self):
href = self.driver.find_element_by_id(BREADCRUMB_SEARCH_ID).get_attribute('href')
self.assertEqual(href, SEARCH_URL)
def test_ent_breadcrumb(self):
self.search_by_unit_id_type_period(ERN, ENTERPRISE, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{ENTERPRISE}/units/{ERN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'ENT - {ERN}')
def test_lou_breadcrumb(self):
self.search_by_unit_id_type_period(LURN, LOCAL_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{LOCAL_UNIT}/units/{LURN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'LOU - {LURN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_reu_breadcrumb(self):
self.search_by_unit_id_type_period(RURN, REPORTING_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{REPORTING_UNIT}/units/{RURN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'REU - {RURN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_leu_breadcrumb(self):
self.search_by_unit_id_type_period(UBRN, LEGAL_UNIT, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{LEGAL_UNIT}/units/{UBRN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'LEU - {UBRN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
def test_ch_breadcrumb(self):
self.search_by_unit_id_type_period(CRN, COMPANY_HOUSE, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{COMPANY_HOUSE}/units/{CRN}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'CRN - {CRN}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
def test_vat_breadcrumb(self):
self.search_by_unit_id_type_period(VATREF, VALUE_ADDED_TAX, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{VALUE_ADDED_TAX}/units/{VATREF}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'VAT - {VATREF}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
def test_paye_breadcrumb(self):
self.search_by_unit_id_type_period(PAYEREF, PAY_AS_YOU_EARN, '201810')
self.assertEqual(self.driver.current_url, f'{SEARCH_URL}/periods/{PERIOD}/types/{PAY_AS_YOU_EARN}/units/{PAYEREF}')
self.assert_breadcrumb_search_href()
self.assert_current_breadcrumb_item_text(f'PAYE - {PAYEREF}')
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_ENT_ID, ERN, ENTERPRISE, PERIOD)
self.assert_breadcrumb_item_text_and_url(BREADCRUMB_LEU_ID, UBRN, LEGAL_UNIT, PERIOD)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
40fec2d844ff14fbb903f58d6e96f8e46ad3fe8c | f83934dd60d4961848c0a86f6d7fbe07b79a1d63 | /glumpy/graphics/collections/__init__.py | c497f742a32eb93341c0ab317f56e2fc37a848d5 | [] | no_license | brianholland/glumpy | 2a31e2f5fd039d1debb30dd010ad36c458f329cf | a691082385e02db9b1d461847b9e36d8534630fa | refs/heads/master | 2020-12-25T21:43:58.743259 | 2015-11-30T11:04:46 | 2015-11-30T11:04:46 | 46,670,630 | 0 | 0 | null | 2015-11-30T11:04:46 | 2015-11-22T17:10:24 | Python | UTF-8 | Python | false | false | 951 | py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Nicolas P. Rougier. All rights reserved.
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
from . collection import Collection
from . base_collection import BaseCollection
from . path_collection import PathCollection
from . point_collection import PointCollection
from . glyph_collection import GlyphCollection
from . marker_collection import MarkerCollection
from . polygon_collection import PolygonCollection
from . segment_collection import SegmentCollection
from . triangle_collection import TriangleCollection
from . raw_path_collection import RawPathCollection
from . raw_triangle_collection import RawTriangleCollection
from . agg_path_collection import AggPathCollection
from . agg_fast_path_collection import AggFastPathCollection
| [
"[email protected]"
] | |
57e0b43bd157f5140c109b02d53b65caeebdb426 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-7747.py | 21059574743466a564b66ececa453e5f67f1165b | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,474 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
$FuncBody
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
c71de68d5d8e1ed94307b087f795dddfc08ddc00 | 7b8b03b7818a1fea58f174ff8c18b43578a6233f | /tests/core/test_models.py | b6c868d65b928963cc11299b613fc8c6b8eeec36 | [] | no_license | defance/coins_ph | 400e4316a2d9a63752b21190ca7f1b0543b85343 | 2f0d3038f5dcca4c0f8711a1b095c6078799eb0b | refs/heads/master | 2020-04-30T19:15:58.398453 | 2019-03-21T22:30:16 | 2019-03-21T22:30:16 | 177,033,466 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from django.test import TestCase
from tests.factories import AccountFactory
class TestTransactionAccount(TestCase):
def test_update_balance(self):
account = AccountFactory(balance=10)
account.update_balance(42)
account.refresh_from_db()
self.assertEquals(account.balance, 52)
| [
"[email protected]"
] | |
3188d90a661c2d9b856f8af75351df705de6d1bf | 26762585d08aa774af9f104472c97a8c7a9df181 | /generators/v4d_super_station_2.py | 1ff715d9c377df692ba6d0c4d8295bd62762d559 | [] | no_license | OxfordSKA/SKA1-low-layouts | 379fbe5c056dc73706b1073f09e485880ecfa180 | 49e3ba2af4a447be38af03dde1d11898e3f8300b | refs/heads/master | 2021-01-17T17:10:41.469929 | 2016-08-12T10:48:24 | 2016-08-12T10:48:24 | 47,823,977 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,178 | py | """Module to generate super-stations for trail v4d spec. layouts"""
# -*- coding: utf-8 -*-
from __future__ import print_function
import matplotlib.pyplot as pyplot
import numpy
from numpy.random import rand
import shutil
import os
from os.path import join
from math import radians
def rotate_coords(x, y, angle):
"""Rotate array of x, y coordinates counter clockwise by angle, in deg."""
xr = x * numpy.cos(radians(angle)) - y * numpy.sin(radians(angle))
yr = x * numpy.sin(radians(angle)) + y * numpy.cos(radians(angle))
return xr, yr
def gridgen(num_points, diameter, min_dist, max_trials=1000):
def grid_position(x, y, scale, grid_size):
jx = int(round(x * scale)) + grid_size / 2
jy = int(round(y * scale)) + grid_size / 2
return jx, jy
def get_trail_position(r):
x = -r + 2.0 * r * rand()
y = -r + 2.0 * r * rand()
return x, y
# Grid size and scaling onto the grid
grid_size = min(100, int(round(float(diameter) / min_dist)))
grid_cell = float(diameter) / grid_size # Grid sector cell size
scale = 1.0 / grid_cell # Scaling onto the sector grid.
check_width = 1
r = diameter / 2.0 # Radius
r_sq = r**2 # Radius, squared
min_dist_sq = min_dist**2 # minimum distance, squared
r_ant = min_dist / 2.0
# Pre-allocate coordinate arrays
x = numpy.zeros(num_points)
y = numpy.zeros(num_points)
# Grid meta-data
grid_i_start = numpy.zeros((grid_size, grid_size), dtype='i8')
grid_i_end = numpy.zeros((grid_size, grid_size), dtype='i8')
grid_count = numpy.zeros((grid_size, grid_size), dtype='i8')
grid_i_next = numpy.zeros(num_points, dtype='i8')
n = num_points
n_req = num_points
num_tries = 0
try_count = list()
for j in range(n_req):
done = False
while not done:
# Generate a trail position
xt, yt = get_trail_position(r)
rt = (xt**2 + yt**2)**0.5
# Check if the point is inside the diameter.
if rt + r_ant > r:
num_tries += 1
# Check if min distance is met.
else:
jx, jy = grid_position(xt, yt, scale, grid_size)
y0 = max(0, jy - check_width)
y1 = min(grid_size, jy + check_width + 1)
x0 = max(0, jx - check_width)
x1 = min(grid_size, jx + check_width + 1)
d_min = diameter # Set initial min to diameter.
for ky in range(y0, y1):
for kx in range(x0, x1):
if grid_count[kx, ky] > 0:
kh1 = grid_i_start[kx, ky]
for kh in range(grid_count[kx, ky]):
dx = xt - x[kh1]
dy = yt - y[kh1]
d_min = min((dx**2 + dy**2)**0.5, d_min)
kh1 = grid_i_next[kh1]
if d_min >= min_dist:
x[j] = xt
y[j] = yt
if grid_count[jx, jy] == 0:
grid_i_start[jx, jy] = j
else:
grid_i_next[grid_i_end[jx, jy]] = j
grid_i_end[jx, jy] = j
grid_count[jx, jy] += 1
try_count.append(num_tries)
num_tries = 0
done = True
else:
num_tries += 1
if num_tries >= max_trials:
n = j - 1
done = True
if num_tries >= max_trials:
break
if n < n_req:
x = x[0:n]
y = y[0:n]
return x, y, try_count
def gen_super_stations():
"""Generation 85 super-stations by rotation"""
# =========================================================================
num_super_stations = 85
num_stations_per_super_station = 6
max_tries_per_station = 5
diameter_gridgen = 40.0 # m
diameter = 35.0 # m
antenna_diameter = 1.5
num_ant_station_gridgen = 300
num_ant_station = 256
ss_diameter = 100.0
st_diameter = diameter
angles = numpy.arange(num_stations_per_super_station - 1) * \
(360.0 / float(num_stations_per_super_station - 1))
angles += 90.0
r0 = diameter
sx = r0 * numpy.cos(numpy.radians(angles))
sy = r0 * numpy.sin(numpy.radians(angles))
sx = numpy.insert(sx, 0, 0.0)
sy = numpy.insert(sy, 0, 0.0)
ss_model_dir = 'v4d_r_90m_180ant_ss_uniform.tm'
if os.path.isdir(ss_model_dir):
shutil.rmtree(ss_model_dir)
os.makedirs(ss_model_dir)
st_model_dir = 'v4d_r_90m_180ant_st_uniform.tm'
if os.path.isdir(st_model_dir):
shutil.rmtree(st_model_dir)
os.makedirs(st_model_dir)
ss_angles = -360.0 * numpy.random.random(num_super_stations) + 360.0
# =========================================================================
ss_ant_x = numpy.zeros((num_stations_per_super_station, num_ant_station))
ss_ant_y = numpy.zeros_like(ss_ant_x)
st_ant_x = numpy.zeros((num_stations_per_super_station, num_ant_station))
st_ant_y = numpy.zeros_like(st_ant_x)
ss_enu = numpy.zeros((num_ant_station * num_stations_per_super_station, 2))
st_enu = numpy.zeros((num_ant_station, 2))
# =========================================================================
circle = pyplot.Circle((0.0, 0.0), ss_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
fig1 = pyplot.figure(figsize=(8, 8))
ax1 = fig1.add_subplot(111, aspect='equal')
ax1.set_xlabel('East [m]')
ax1.set_ylabel('North [m]')
ax1.grid()
ax1.set_xlim(-60, 60)
ax1.set_ylim(-60, 60)
line1, = ax1.plot([], [], 'k+')
label1 = ax1.text(0.02, 0.98, '', ha='left', va='top', style='italic',
color='k', transform=ax1.transAxes, fontsize='x-small')
circle = pyplot.Circle((0.0, 0.0), ss_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
ax1.add_artist(circle)
fig2 = pyplot.figure(figsize=(8, 8))
ax2 = fig2.add_subplot(111, aspect='equal')
ax2.set_xlabel('East [m]')
ax2.set_ylabel('North [m]')
ax2.grid()
ax2.set_xlim(-60, 60)
ax2.set_ylim(-60, 60)
circle = pyplot.Circle((0.0, 0.0), ss_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
ax2.add_artist(circle)
fig3 = pyplot.figure(figsize=(8, 8))
ax3 = fig3.add_subplot(111, aspect='equal')
ax3.set_xlabel('East [m]')
ax3.set_ylabel('North [m]')
ax3.grid()
ax3.set_xlim(-20, 20)
ax3.set_ylim(-20, 20)
line3, = ax3.plot([], [], 'k+')
label3 = ax3.text(0.02, 0.98, '', ha='left', va='top', style='italic',
color='k', transform=ax3.transAxes, fontsize='x-small')
circle = pyplot.Circle((0.0, 0.0), st_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
ax3.add_artist(circle)
fig4 = pyplot.figure(figsize=(8, 8))
ax4 = fig4.add_subplot(111, aspect='equal')
ax4.set_xlabel('East [m]')
ax4.set_ylabel('North [m]')
ax4.grid()
ax4.set_xlim(-20, 20)
ax4.set_ylim(-20, 20)
circle = pyplot.Circle((0.0, 0.0), st_diameter / 2.0,
color='r', linestyle='--',
fill=False, alpha=0.3, lw=2.0)
ax4.add_artist(circle)
# =========================================================================
for i in range(num_super_stations):
print('== super station %i == : ' % i, end='')
for j in range(num_stations_per_super_station):
print('%i' % j, end='')
trial = 0
while trial < max_tries_per_station:
print('.', end='')
ax, ay, _ = gridgen(num_ant_station_gridgen, diameter_gridgen,
antenna_diameter, max_trials=10000)
if ax.shape[0] == num_ant_station_gridgen:
ar = (ax**2 + ay**2)**0.5
# Sort by radius
sort_idx = ar.argsort()
ax = ax[sort_idx]
ay = ay[sort_idx]
ax = ax[:num_ant_station]
ay = ay[:num_ant_station]
ss_ant_x[j, :] = ax + sx[j]
ss_ant_y[j, :] = ay + sy[j]
st_ant_x[j, :] = ax
st_ant_y[j, :] = ay
break
else:
trial += 1
continue
if trial == max_tries_per_station:
print()
print('Error, Failed to find enough antennas for station '
'%i/%i' % (ax.shape[0], num_ant_station_gridgen))
return
print()
# Rotate super-station
ss_ant_x, ss_ant_y = rotate_coords(ss_ant_x, ss_ant_y, ss_angles[i])
# Write station and super-station folders
station_dir = 'station%03i' % i
os.makedirs(join(ss_model_dir, station_dir))
ss_enu[:, 0] = ss_ant_x.flatten()
ss_enu[:, 1] = ss_ant_y.flatten()
station_file = join(ss_model_dir, station_dir, 'layout.txt')
numpy.savetxt(station_file, ss_enu, fmt='% -16.12f % -16.12f')
line1.set_data(ss_enu[:, 0], ss_enu[:, 1])
label1.set_text('super station %03i' % i)
fig1.savefig(join(ss_model_dir, 'station_%03i.png' % i))
ax2.plot(ss_enu[:, 0], ss_enu[:, 1], 'k+', alpha=0.1)
# Write station folders
for j in range(num_stations_per_super_station):
station_id = i * num_stations_per_super_station + j
station_dir = 'station%03i' % station_id
os.makedirs(join(st_model_dir, station_dir))
st_enu[:, 0] = st_ant_x[j, :].flatten()
st_enu[:, 1] = st_ant_y[j, :].flatten()
station_file = join(st_model_dir, station_dir, 'layout.txt')
numpy.savetxt(station_file, st_enu, fmt='% -16.12f % -16.12f')
# Plot station and add to station superposition
line3.set_data(st_enu[:, 0], st_enu[:, 1])
label3.set_text('station %03i' % station_id)
fig3.savefig(join(st_model_dir, 'station_%03i.png' % station_id))
ax4.plot(st_enu[:, 0], st_enu[:, 1], 'k+', alpha=0.1)
fig2.savefig(join(ss_model_dir, 'all_stations.png'))
fig4.savefig(join(st_model_dir, 'all_stations.png'))
ss_layout = numpy.zeros((num_super_stations, 3))
numpy.savetxt(join(ss_model_dir, 'layout.txt'), ss_layout,
fmt='%3.1f %3.1f %3.1f')
total_stations = num_super_stations * num_stations_per_super_station
st_layout = numpy.zeros((total_stations, 3))
numpy.savetxt(join(st_model_dir, 'layout.txt'), st_layout,
fmt='%3.1f %3.1f %3.1f')
if __name__ == '__main__':
gen_super_stations()
| [
"[email protected]"
] | |
f800d6b3ca316df0db0ffe4717caaddae33260f8 | 3ea684487ef727fb2f8d16a030769f32a4f4003a | /datahq/apps/receiver/bootstrap.py | 90c3fa6dc99fc38cd04840c76b173a531f02f9b5 | [] | no_license | adewinter/data-hq | 5781e6669e0625ea9ae7cf94ec77c528485c2951 | ca03656c835f8caa5156326500c05bb83ab931ca | refs/heads/master | 2021-01-18T12:48:26.584454 | 2010-08-19T13:15:03 | 2010-08-19T13:15:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | import os
from django.conf import settings
# make our directories if they're not there
for dir in [settings.RECEIVER_SUBMISSION_PATH,
settings.RECEIVER_ATTACHMENT_PATH,
settings.RECEIVER_EXPORT_PATH]:
if not os.path.isdir(dir):
os.mkdir(dir)
| [
"[email protected]"
] | |
19cc849f50ba984019a615ec3532eb04f622db66 | 3efee0cf2bd9e0c34bfdd94ab24a15cb88c04509 | /PWEM_examples/kxky_bandstructure_benchmark_plotting_with_fdfd.py | 20872b8c10843e1edc1184e3d3cbe5d7ee1b70bd | [
"MIT"
] | permissive | luwl85/Rigorous-Coupled-Wave-Analysis | bf5016ec70525f5e7bf59dfa93a03902afdfac12 | a28fdf90b5b5fc0fedacc8bb44a0a0c2f2a02143 | refs/heads/master | 2023-04-25T20:46:45.397976 | 2021-05-20T22:17:54 | 2021-05-20T22:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import sys
import os
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import plotly
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from mpl_toolkits.mplot3d import Axes3D
matlab_data =os.path.join('kxky_photonic_circle_bandstructure.mat');
mat = scipy.io.loadmat(matlab_data)
print(mat.keys())
wvlen_scan = np.squeeze(mat['wvlen_scan']);
omega_scan = 1/wvlen_scan;
ky_spectra = np.squeeze(mat['ky_spectra']);
print(ky_spectra.shape)
ky_scan = np.linspace(-np.pi, np.pi, 400);
X,Y = np.meshgrid(omega_scan, ky_scan);
print(X.shape)
#first dimension is ky... second dimension is kx...
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, Y, np.real(ky_spectra[:,:,0]), marker='.')
plt.show(); | [
"[email protected]"
] | |
75a3cd2e9f625d1e43a53e0412340d4ddac9a76a | 9923e30eb99716bfc179ba2bb789dcddc28f45e6 | /swagger-codegen/python/test/test_asset.py | 4d4822c956810304a52b9be4b031c37a4dfaaa89 | [] | no_license | silverspace/samsara-sdks | cefcd61458ed3c3753ac5e6bf767229dd8df9485 | c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa | refs/heads/master | 2020-04-25T13:16:59.137551 | 2019-03-01T05:49:05 | 2019-03-01T05:49:05 | 172,804,041 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,361 | py | # coding: utf-8
"""
Samsara API
# Introduction Samsara provides API endpoints for interacting with Samsara Cloud, so that you can build powerful applications and custom solutions with sensor data. Samsara has endpoints available to track and analyze sensors, vehicles, and entire fleets. The Samsara Cloud API is a [RESTful API](https://en.wikipedia.org/wiki/Representational_state_transfer) accessed by an [HTTP](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) client such as wget or curl, or HTTP libraries of most modern programming languages including python, ruby, java. We use built-in HTTP features, like HTTP authentication and HTTP verbs, which are understood by off-the-shelf HTTP clients. We allow you to interact securely with our API from a client-side web application (though you should never expose your secret API key). [JSON](http://www.json.org/) is returned by all API responses, including errors. If you’re familiar with what you can build with a REST API, the following API reference guide will be your go-to resource. API access to the Samsara cloud is available to all Samsara administrators. To start developing with Samsara APIs you will need to [obtain your API keys](#section/Authentication) to authenticate your API requests. If you have any questions you can reach out to us on [[email protected]](mailto:[email protected]) # Endpoints All our APIs can be accessed through HTTP requests to URLs like: ```curl https://api.samsara.com/<version>/<endpoint> ``` All our APIs are [versioned](#section/Versioning). If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. # Authentication To authenticate your API request you will need to include your secret token. You can manage your API tokens in the [Dashboard](https://cloud.samsara.com). They are visible under `Settings->Organization->API Tokens`. Your API tokens carry many privileges, so be sure to keep them secure. Do not share your secret API tokens in publicly accessible areas such as GitHub, client-side code, and so on. Authentication to the API is performed via [HTTP Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication). Provide your API token as the basic access_token value in the URL. You do not need to provide a password. ```curl https://api.samsara.com/<version>/<endpoint>?access_token={access_token} ``` All API requests must be made over [HTTPS](https://en.wikipedia.org/wiki/HTTPS). Calls made over plain HTTP or without authentication will fail. # Request Methods Our API endpoints use [HTTP request methods](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods) to specify the desired operation to be performed. The documentation below specified request method supported by each endpoint and the resulting action. ## GET GET requests are typically used for fetching data (like data for a particular driver). ## POST POST requests are typically used for creating or updating a record (like adding new tags to the system). With that being said, a few of our POST requests can be used for fetching data (like current location data of your fleet). ## PUT PUT requests are typically used for updating an existing record (like updating all devices associated with a particular tag). ## PATCH PATCH requests are typically used for modifying an existing record (like modifying a few devices associated with a particular tag). ## DELETE DELETE requests are used for deleting a record (like deleting a tag from the system). # Response Codes All API requests will respond with appropriate [HTTP status code](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes). Your API client should handle each response class differently. ## 2XX These are successful responses and indicate that the API request returned the expected response. ## 4XX These indicate that there was a problem with the request like a missing parameter or invalid values. Check the response for specific [error details](#section/Error-Responses). Requests that respond with a 4XX status code, should be modified before retrying. ## 5XX These indicate server errors when the server is unreachable or is misconfigured. In this case, you should retry the API request after some delay. # Error Responses In case of a 4XX status code, the body of the response will contain information to briefly explain the error reported. To help debugging the error, you can refer to the following table for understanding the error message. | Status Code | Message | Description | |-------------|----------------|-------------------------------------------------------------------| | 401 | Invalid token | The API token is invalid and could not be authenticated. Please refer to the [authentication section](#section/Authentication). | | 404 | Page not found | The API endpoint being accessed is invalid. | | 400 | Bad request | Default response for an invalid request. Please check the request to make sure it follows the format specified in the documentation. | # Versioning All our APIs are versioned. Our current API version is `v1` and we are continuously working on improving it further and provide additional endpoints. If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. Thus, you can use our current API version worry free. # FAQs Check out our [responses to FAQs here](https://kb.samsara.com/hc/en-us/sections/360000538054-APIs). Don’t see an answer to your question? Reach out to us on [[email protected]](mailto:[email protected]). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import samsara
from samsara.models.asset import Asset # noqa: E501
from samsara.rest import ApiException
class TestAsset(unittest.TestCase):
"""Asset unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAsset(self):
"""Test Asset"""
# FIXME: construct object with mandatory attributes with example values
# model = samsara.models.asset.Asset() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6821205dff8d4bf5af67bd99c4b092e8d390a3c3 | 5c533e2cf1f2fa87e55253cdbfc6cc63fb2d1982 | /python/pymonad/monad_parse.py | a37f2195d1412f89bfddadf9d4bb469858d0db09 | [] | no_license | philzook58/python | 940c24088968f0d5c655e2344dfa084deaefe7c6 | 6d43db5165c9bcb17e8348a650710c5f603e6a96 | refs/heads/master | 2020-05-25T15:42:55.428149 | 2018-05-14T03:33:29 | 2018-05-14T03:33:29 | 69,040,196 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 312 | py |
# parser is of form string -> [(symbol, therestofstring), (other possiblity), (other possiblity), ...]
#parserbind needsto return
def parsebind(parser , parserproducer):
def combinerparser(string):
possibleparses = parser(string)
for (symb, restofstring) in possibleparses:
return combinedparser | [
"[email protected]"
] | |
befc484720fc8b2dd7e72ad047976b06972d3b9b | 05526f4941be395c0d41b4eed16ede6ae45b5e3a | /src/ossify/tokenizer.py | 902357cd116b2634aebe40f9a5b4c593ae199181 | [
"BSD-3-Clause"
] | permissive | ndevenish/ossify | 940606d75932597aedac2e0377f1f18ee249a126 | 5045b0cee309093ad2ccf35a4b1d92c4bb2783f3 | refs/heads/master | 2022-07-14T19:58:55.616828 | 2020-05-09T00:28:19 | 2020-05-09T00:28:19 | 261,609,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,238 | py | import io
import re
import sys
import token
import tokenize
from token import tok_name
from tokenize import TokenInfo
from typing import Iterator, List
numchars = "0123456789"
reNamelike = re.compile(r"[A-Za-z_]")
reWhitespace = re.compile("[ \t\n]+")
reName = re.compile(r"[A-Za-z_]\w*")
reStringStart = re.compile(r'"""|"|\'\'\'|\'')
def read_number(data):
if "3" in data:
print(data)
# Cheat for now
# -1 because will always be a newline, but sometimes that newline
# is an escaped newline
s = io.StringIO(data[:-1])
toke = next(tokenize.generate_tokens(s.readline))
if toke.type == token.NUMBER:
return toke.string
return False
def character_generator(file_interface, encoding="utf-8", verbose=False):
raw_data = file_interface.read()
try:
data = raw_data.decode(encoding)
except AttributeError:
data = raw_data
pos, maxlen = 0, len(data)
line_start, line_end = 0, 0
line_no = 0
while pos < maxlen:
# if pos > 3050:
# return
_previous_pos = pos
if line_end <= pos:
line_no += 1
# work out the line end for line-slicing
line_start = line_end
line_end = data.find("\n", pos) + 1
if line_end == 0:
line_end = maxlen
line = data[line_start:line_end]
line_remaining = data[pos:line_end]
# print(line)
if verbose:
print(
"Processing line: \033[37m"
+ repr(
data[line_start:pos] + "_e_[30;1m|_e_[0m" + data[pos:line_end]
).replace("_e_", "\033")
)
if data[pos] == "\\" and not (pos + 1) == maxlen and data[pos + 1] == "\n":
# Handle swallowing escaped newlines
if verbose:
print("Escaped newline")
pos += 2
line_no += 1
elif match := reWhitespace.match(data, pos=pos):
newlines = match.group().count("\n")
if "\n" in match.group():
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos),
end=(line_no + newlines, match.end()),
line=line,
)
else:
yield TokenInfo(
type=token.OP,
string=" ",
start=(line_no, pos),
end=(line_no, match.end()),
line=line,
)
pos = match.end()
line_no += newlines
# elif data[pos] == "\t":
# if verbose:
# print(f"{pos}: Tab (sent space)")
# yield TokenInfo(
# type=token.OP,
# string=" ",
# start=(line_no, pos),
# end=(line_no, pos),
# line=line,
# )
# pos += 1
elif data[pos] == "\n":
if verbose:
print(f"{pos}: NEWLINE")
pos += 1
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos - 1),
end=(line_no, pos),
line=line,
)
elif (string := reStringStart.match(data, pos=pos)) and (
pos == 0 or data[pos - 1] in " \n\t{}="
):
quote_type = string.group()
end_pattern = r"(?<!\\)" + quote_type
re_endquote = re.compile(end_pattern, re.M | re.S)
end_match = re_endquote.search(data, pos=pos + len(quote_type))
assert end_match, "Unterminated string"
contents = data[
string.start() + len(quote_type) : end_match.end() - len(quote_type)
]
start_l = line_no
line_no += contents.count("\n")
# Found the start of some string
# data.find(quote_type, pos=pos+len(string))
if verbose:
print(f"STRING: {contents!r}")
full_str = quote_type + contents + quote_type
yield TokenInfo(
type=token.STRING,
string=full_str,
start=(start_l, pos),
end=(line_no + 1, pos + len(full_str)),
line="",
)
pos = end_match.end()
elif name := reName.match(data, pos=pos):
if verbose:
print(f"{pos}: NAME {name.group()}")
yield TokenInfo(
type=token.NAME,
string=name.group(),
start=(line_no, name.start()),
end=(line_no, name.end()),
line=line,
)
pos += len(name.group())
elif data[pos] in "0123456789":
yield TokenInfo(
type=token.NUMBER,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos),
line=line,
)
pos += 1
else:
if verbose:
print(f"OP: {data[pos]}")
yield TokenInfo(
type=token.OP,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos + 1),
line=line,
)
# print("Something else?")
pos += 1
assert pos != _previous_pos, "Didn't advance position"
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos),
end=(line_no, pos),
line="",
)
yield TokenInfo(
type=token.ENDMARKER,
string="",
start=(line_no, pos + 1),
end=(line_no, pos + 1),
line="",
)
return None
def simple_generator(file_interface, encoding="utf-8", verbose=True):
#
# needcont: Currently processing a continuing string
# contstr: The string currently being built
# endprog: The match condition for ending a continuing string
raw_data = file_interface.read()
try:
data = raw_data.decode(encoding)
except AttributeError:
data = raw_data
# last_line = b""
# line = b""
# line_no = 0
# while True:
# try:
# last_line = line
# line = file_interface()
# except StopIteration:
# line = b""
# if encoding is not None:
# line = line.decode(encoding)
# line_no += 1
# pos, max = 0, len(line)
pos, maxlen = 0, len(data)
line_start, line_end = 0, 0
line_no = 0
while pos < maxlen:
# if pos > 3050:
# return
_previous_pos = pos
if line_end <= pos:
line_no += 1
# work out the line end for line-slicing
line_start = line_end
line_end = data.find("\n", pos) + 1
if line_end == 0:
line_end = maxlen
line = data[line_start:line_end]
line_remaining = data[pos:line_end]
if verbose:
print(
"Processing line: \033[37m"
+ repr(
data[line_start:pos] + "_e_[30;1m|_e_[0m" + data[pos:line_end]
).replace("_e_", "\033")
)
if match := reWhitespace.match(line_remaining):
# Skip whitespace
pos += match.end()
elif data[pos] == "\\" and not (pos + 1) == maxlen and data[pos + 1] == "\n":
# Handle swallowing escaped newlines
if verbose:
print("Escaped newline")
pos += 2
elif data[pos] == "\n":
if verbose:
print(f"NEWLINE")
pos += 1
yield TokenInfo(
type=token.NEWLINE,
string="\n",
start=(line_no, pos - 1),
end=(line_no, pos),
line=line,
)
elif match := reName.match(line_remaining):
if verbose:
print(f"NAME: {match.group(0)}")
pos += match.end()
yield TokenInfo(
type=token.NAME,
string=match.group(0),
start=(line_no, match.start()),
end=(line_no, match.end()),
line=line,
)
elif data[pos] == "#":
pos = line_end
elif number := read_number(line_remaining):
if verbose:
print(f"NUMBER: {number}")
yield TokenInfo(
type=token.NUMBER,
string=number,
start=(line_no, pos),
end=(line_no, pos + len(number)),
line=line,
)
pos += len(number)
elif string := reStringStart.match(data, pos=pos):
quote_type = string.group()
end_pattern = r"(?<!\\)" + quote_type
re_endquote = re.compile(end_pattern, re.M | re.S)
end_match = re_endquote.search(data, pos=pos + len(quote_type))
assert end_match, "Unterminated string"
contents = data[
string.start() + len(quote_type) : end_match.end() - len(quote_type)
]
# Found the start of some string
# data.find(quote_type, pos=pos+len(string))
if verbose:
print(f"STRING: {contents!r}")
pos = end_match.end()
else:
if verbose:
print(f"CHAR: {data[pos]}")
yield TokenInfo(
type=token.OP,
string=data[pos],
start=(line_no, pos),
end=(line_no, pos + 1),
line=line,
)
# print("Something else?")
pos += 1
assert pos != _previous_pos, "Didn't advance position"
return TokenInfo(type=token.ENDMARKER, string="", start=pos, end=pos, line="")
Mark = int # NewType('Mark', int)
exact_token_types = token.EXACT_TOKEN_TYPES # type: ignore
def shorttok(tok: tokenize.TokenInfo) -> str:
return (
"%-25.25s"
% f"{tok.start[0]}.{tok.start[1]}: {token.tok_name[tok.type]}:{tok.string!r}"
)
class Tokenizer:
"""Caching wrapper for the tokenize module.
This is pretty tied to Python's syntax.
"""
_tokens: List[tokenize.TokenInfo]
def __init__(
self, tokengen: Iterator[tokenize.TokenInfo], *, verbose: bool = False
):
self._tokengen = tokengen
self._tokens = []
self._index = 0
self._verbose = verbose
if verbose:
self.report(False, False)
def getnext(self) -> tokenize.TokenInfo:
"""Return the next token and updates the index."""
cached = True
while self._index == len(self._tokens):
tok = next(self._tokengen)
if tok.type in (tokenize.COMMENT, tokenize.INDENT, tokenize.DEDENT,):
continue
# Transform NL to NEWLINE
if tok.type == token.NL:
tok = tokenize.TokenInfo(
token.NEWLINE,
tok.string,
start=tok.start,
end=tok.end,
line=tok.line,
)
if tok.type == token.ERRORTOKEN and tok.string.isspace():
continue
self._tokens.append(tok)
cached = False
tok = self._tokens[self._index]
self._index += 1
if self._verbose:
self.report(cached, False)
return tok
def peek(self) -> tokenize.TokenInfo:
"""Return the next token *without* updating the index."""
while self._index == len(self._tokens):
tok = next(self._tokengen)
if tok.type in (tokenize.COMMENT, tokenize.INDENT, tokenize.DEDENT,):
continue
# Transform NL to NEWLINE
if tok.type == token.NL:
tok = tokenize.TokenInfo(
token.NEWLINE,
tok.string,
start=tok.start,
end=tok.end,
line=tok.line,
)
if tok.type == token.ERRORTOKEN and tok.string.isspace():
continue
self._tokens.append(tok)
return self._tokens[self._index]
def diagnose(self) -> tokenize.TokenInfo:
if not self._tokens:
self.getnext()
return self._tokens[-1]
def mark(self) -> Mark:
return self._index
def reset(self, index: Mark) -> None:
if index == self._index:
return
assert 0 <= index <= len(self._tokens), (index, len(self._tokens))
old_index = self._index
self._index = index
if self._verbose:
self.report(True, index < old_index)
def report(self, cached: bool, back: bool) -> None:
if back:
fill = "-" * self._index + "-"
elif cached:
fill = "-" * self._index + ">"
else:
fill = "-" * self._index + "*"
if self._index == 0:
print(f"{fill} (Bof)")
else:
tok = self._tokens[self._index - 1]
print(f"{fill} {shorttok(tok)}")
def main():
import argparse
# Helper error handling routines
def perror(message):
sys.stderr.write(message)
sys.stderr.write("\n")
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog="python -m tokenize")
parser.add_argument(
dest="filename",
nargs="?",
metavar="filename.py",
help="the file to tokenize; defaults to stdin",
)
parser.add_argument(
"-e",
"--exact",
dest="exact",
action="store_true",
help="display token names using the exact type",
)
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with open(filename, "r") as f:
tokens = list(character_generator(f))
else:
filename = "<stdin>"
tokens = character_generator(sys.stdin, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string))
except SyntaxError as err:
error(err, filename)
except OSError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
94a1445b5d73052a0e9fbc2caed1e94ae674a0da | 4f2b9848ee1cf41017b424c7367a240f93625e86 | /doc/tutorial/config.py | cdb60e8b8cc1f8f18664a9d5edb55b488c038574 | [
"Apache-2.0"
] | permissive | martin-dostal-eli/python-icat | f5cc0e497376d7264db1af2bb9ad588e29a9bd7b | 8c882a3095f2dd7276a7c0edba44dc9b3ef4eedd | refs/heads/master | 2023-08-18T02:12:30.267009 | 2021-07-20T11:24:25 | 2021-07-20T11:24:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | #! /usr/bin/python
from __future__ import print_function
import icat
import icat.config
config = icat.config.Config(needlogin=False, ids=False)
client, conf = config.getconfig()
print("Connect to %s\nICAT version %s" % (conf.url, client.apiversion))
| [
"[email protected]"
] | |
e797642929d74abf07f38be4d559a60c4edc39c4 | a7b07e14f58008e4c9567a9ae67429cedf00e1dc | /lib/jnpr/healthbot/swagger/models/rule_schema_variable.py | 630dceaecb69d32efa58bd7ea4450d2121bdd4cb | [
"Apache-2.0"
] | permissive | dmontagner/healthbot-py-client | 3750d8375bc4fa7bedcdbc6f85f17fb812c19ea9 | 0952e0a9e7ed63c9fe84879f40407c3327735252 | refs/heads/master | 2020-08-03T12:16:38.428848 | 2019-09-30T01:57:24 | 2019-09-30T01:57:24 | 211,750,200 | 0 | 0 | Apache-2.0 | 2019-09-30T01:17:48 | 2019-09-30T01:17:47 | null | UTF-8 | Python | false | false | 6,447 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaVariable(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'name': 'str',
'type': 'str',
'value': 'str'
}
attribute_map = {
'description': 'description',
'name': 'name',
'type': 'type',
'value': 'value'
}
def __init__(self, description=None, name=None, type=None, value=None): # noqa: E501
"""RuleSchemaVariable - a model defined in Swagger""" # noqa: E501
self._description = None
self._name = None
self._type = None
self._value = None
self.discriminator = None
if description is not None:
self.description = description
self.name = name
self.type = type
if value is not None:
self.value = value
@property
def description(self):
"""Gets the description of this RuleSchemaVariable. # noqa: E501
Description about the variable # noqa: E501
:return: The description of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RuleSchemaVariable.
Description about the variable # noqa: E501
:param description: The description of this RuleSchemaVariable. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this RuleSchemaVariable. # noqa: E501
Variable name used in the playbook. Should be of pattern [a-zA-Z][a-zA-Z0-9_-]* # noqa: E501
:return: The name of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RuleSchemaVariable.
Variable name used in the playbook. Should be of pattern [a-zA-Z][a-zA-Z0-9_-]* # noqa: E501
:param name: The name of this RuleSchemaVariable. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 64:
raise ValueError("Invalid value for `name`, length must be less than or equal to `64`") # noqa: E501
if name is not None and not re.search('^[a-zA-Z][a-zA-Z0-9_-]*$', name): # noqa: E501
raise ValueError("Invalid value for `name`, must be a follow pattern or equal to `/^[a-zA-Z][a-zA-Z0-9_-]*$/`") # noqa: E501
self._name = name
@property
def type(self):
"""Gets the type of this RuleSchemaVariable. # noqa: E501
Type of value supported. This information will be used by UI to display options available for the values # noqa: E501
:return: The type of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this RuleSchemaVariable.
Type of value supported. This information will be used by UI to display options available for the values # noqa: E501
:param type: The type of this RuleSchemaVariable. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["int", "float", "string", "boolean", "device-group", "device", "sensor-argument"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def value(self):
"""Gets the value of this RuleSchemaVariable. # noqa: E501
Default value for the variable # noqa: E501
:return: The value of this RuleSchemaVariable. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this RuleSchemaVariable.
Default value for the variable # noqa: E501
:param value: The value of this RuleSchemaVariable. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaVariable):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
913a809b21dce8f948f0e742c823d688bef2cbc7 | 6032f996f989d521dbdee23ce6c1fbd778d8e964 | /qanta/wikipedia/categories.py | b5dedd32b88990ad251a82da1ae4cf7fe424ea37 | [
"MIT"
] | permissive | npow/qb | 9af1c07afd10f6aad9dbcbdd9209c6fde0e4347f | 044e623d2cbda96209fa1fdedffefa2208c98755 | refs/heads/master | 2020-05-26T15:41:13.864334 | 2019-05-26T16:47:07 | 2019-05-26T16:47:07 | 188,290,907 | 0 | 0 | null | 2019-05-23T19:02:23 | 2019-05-23T19:02:23 | null | UTF-8 | Python | false | false | 1,525 | py | """
Process Wikipedia category links
"""
import json
import re
import csv
import click
import tqdm
@click.group()
def categorylinks_cli():
pass
@categorylinks_cli.command()
@click.argument('categories_csv')
@click.argument('out_jsonl')
def clean(categories_csv, out_jsonl):
with open(categories_csv) as in_f, open(out_jsonl, 'w') as out_f:
for line in csv.reader(in_f):
if len(line) == 2:
if re.match(r'[a-zA-Z0-9\-\_\s]+', line[1]):
out_f.write(json.dumps({
'id': int(line[0]),
'cat': line[1]
}))
out_f.write('\n')
@categorylinks_cli.command()
@click.argument('category_csv')
@click.argument('out_json')
def disambiguate(category_csv, out_json):
disambiguation_pages = set()
blacklist = {
'Articles_with_links_needing_disambiguation_from_April_2018',
'All_articles_with_links_needing_disambiguation'
}
with open(category_csv) as f:
reader = csv.reader(f)
for r in tqdm.tqdm(reader, mininterval=1):
page_id, category = r[0], r[1]
l_category = category.lower()
if ((category not in blacklist) and
('disambiguation' in l_category) and
('articles_with_links_needing_disambiguation' not in l_category)):
disambiguation_pages.add(int(page_id))
with open(out_json, 'w') as f:
json.dump(list(disambiguation_pages), f)
| [
"[email protected]"
] | |
7a2fcbba659bb83f947490fc946a7ff3ba4665d2 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/ARMmbed/htrun/mbed_host_tests/host_tests_plugins/host_test_plugins.py | 1c965fab88a3dc757f8bce97bec9d4293718641b | [] | no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 7,762 | py | """
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <[email protected]>
"""
import os
import sys
import platform
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
class HostTestPluginBase:
""" Base class for all plugins used with host tests
"""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, Copymethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
required_parameters = [] # Parameters required for 'kwargs' in plugin APIs: e.g. self.execute()
stable = False # Determine if plugin is stable and can be used
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return False
def execute(self, capability, *args, **kwargs):
"""! Executes capability by name
@param capability Capability name
@param args Additional arguments
@param kwargs Additional arguments
@details Each capability e.g. may directly just call some command line program or execute building pythonic function
@return Capability call return value
"""
return False
def is_os_supported(self, os_name=None):
"""!
@return Returns true if plugin works (supportes) under certain OS
@os_name String describing OS.
See self.mbed_os_support() and self.mbed_os_info()
@details In some cases a plugin will not work under particular OS
mainly because command / software used to implement plugin
functionality is not available e.g. on MacOS or Linux.
"""
return True
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
"""! Function prints error in console and exits always with False
@param text Text to print
"""
print "Plugin error: %s::%s: %s"% (self.name, self.type, text)
return False
def print_plugin_info(self, text, NL=True):
"""! Function prints notification in console and exits always with True
@param text Text to print
@param NL Newline will be added behind text if this flag is True
"""
if NL:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text)
else:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text),
return True
def print_plugin_char(self, char):
""" Function prints char on stdout
"""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25):
"""! Waits until destination_disk is ready and can be accessed by e.g. copy commands
@return True if mount point was ready in given time, False otherwise
@param destination_disk Mount point (disk) which will be checked for readiness
@param init_delay - Initial delay time before first access check
@param loop_delay - polling delay for access check
"""
result = False
# Let's wait for 30 * loop_delay + init_delay max
if not access(destination_disk, F_OK):
self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
sleep(init_delay)
for i in range(30):
if access(destination_disk, F_OK):
result = True
break
sleep(loop_delay)
self.print_plugin_char('.')
return result
def check_parameters(self, capability, *args, **kwargs):
"""! This function should be ran each time we call execute() to check if none of the required parameters is missing
@return Returns True if all parameters are passed to plugin, else return False
@param capability Capability name
@param args Additional parameters
@param kwargs Additional parameters
"""
missing_parameters = []
for parameter in self.required_parameters:
if parameter not in kwargs:
missing_parameters.append(parameter)
if len(missing_parameters):
self.print_plugin_error("execute parameter(s) '%s' missing!"% (', '.join(missing_parameters)))
return False
return True
def run_command(self, cmd, shell=True):
"""! Runs command from command line.
@param cmd Command to execute
@param shell True if shell command should be executed (eg. ls, ps)
@details Function prints 'cmd' return code if execution failed
@return True if command successfully executed
"""
result = True
try:
ret = call(cmd, shell=shell)
if ret:
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
return False
except Exception as e:
result = False
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
self.print_plugin_error(str(e))
return result
def mbed_os_info(self):
"""! Returns information about host OS
@return Returns tuple with information about OS and host platform
"""
result = (os.name,
platform.system(),
platform.release(),
platform.version(),
sys.platform)
return result
def mbed_os_support(self):
"""! Function used to determine host OS
@return Returns None if host OS is unknown, else string with name
@details This function should be ported for new OS support
"""
result = None
os_info = self.mbed_os_info()
if (os_info[0] == 'nt' and os_info[1] == 'Windows'):
result = 'Windows7'
elif (os_info[0] == 'posix' and os_info[1] == 'Linux' and ('Ubuntu' in os_info[3])):
result = 'Ubuntu'
elif (os_info[0] == 'posix' and os_info[1] == 'Linux'):
result = 'LinuxGeneric'
elif (os_info[0] == 'posix' and os_info[1] == 'Darwin'):
result = 'Darwin'
return result
| [
"[email protected]"
] | |
5957be3eebf4bcc847582b8b20f6771924155403 | 4c9580b2e09e2b000e27a1c9021b12cf2747f56a | /chapter05/chapter05_example01/chapter05_example01/settings.py | 6bdae198873c43cd8667c6b9aac8266fb69c6642 | [] | no_license | jzplyy/xiaoyue_mall | 69072c0657a6878a4cf799b8c8218cc7d88c8d12 | 4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc | refs/heads/master | 2023-06-26T02:48:03.103635 | 2021-07-22T15:51:07 | 2021-07-22T15:51:07 | 388,514,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,368 | py | """
Django settings for chapter05_example01 project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(BASE_DIR, 'chapter05_example01\\apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gj(^a0w1e_)p4_+%9y4q3i#7yz_423=^ze4+9-wpj!8sci=esy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'goods'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chapter05_example01.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chapter05_example01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
53f85d5e77b251fd803da0cc317dc6dac3e3fd02 | b74e9be747c1a99fc5d67ca096157f512baf02ca | /tools/harness-automation/cases/reed_5_2_4.py | 151fa67c0fb6cee9b74e8e983fa7bb67a1aea761 | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | amccool/openthread | 468838cebc083d234192926aacb0e3efc0a83463 | 1e9d3c1dbfd66aa48c4cbb1dda0b41c9f05fefc7 | refs/heads/master | 2021-01-16T23:03:46.503666 | 2016-09-06T03:21:05 | 2016-09-06T03:21:05 | 67,469,844 | 0 | 0 | BSD-3-Clause | 2019-11-01T20:11:16 | 2016-09-06T03:23:32 | C++ | UTF-8 | Python | false | false | 1,846 | py | #!/usr/bin/env python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class REED_5_2_4(HarnessCase):
suite = 16
case = '5 2 4'
golden_devices_required = 17
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ef8495ed987b371d3c9c09347e179d7fee0cfd92 | 6320fef2ea7376c2b35f97f1a5af004e90f09098 | /1-2주차 실습(복습)/venv/Lib/site-packages/pygame/tests/test_utils/__init__.py | fd3ec69cb674929081e3d41837df1458fa33d018 | [] | no_license | Dplo1514/ploaistudy | 7aa08d7f71653748a9e32dcc09ee8f6cec0aaed9 | e35e42b1e5f0c90cc1e2a59993a1ef73d8872d0c | refs/heads/master | 2023-09-03T00:45:55.601651 | 2021-10-24T12:19:38 | 2021-10-24T12:19:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,442 | py | #################################### IMPORTS ###################################
is_pygame_pkg = __name__.startswith("pygame.tests.")
import tempfile, sys, pygame, time, os
################################################################################
# Python 3.x compatibility
try:
xrange_ = xrange
except NameError:
xrange_ = range
try:
raw_input_ = raw_input
except NameError:
raw_input_ = input
if sys.version_info[0] == 3:
def tostring(row):
"""Convert row of bytes to string. Expects `row` to be an
``array``.
"""
return row.tobytes()
else:
def tostring(row):
"""Convert row of bytes to string. Expects `row` to be an
``array``.
"""
return row.tostring()
import unittest
if not hasattr(unittest.TestCase, "subTest"):
import contextlib
@contextlib.contextmanager
def subTest(self, msg=None, **params):
yield
return
unittest.TestCase.subTest = subTest
def geterror():
return sys.exc_info()[1]
class AssertRaisesRegexMixin(object):
"""Provides a way to prevent DeprecationWarnings in python >= 3.2.
For this mixin to override correctly it needs to be before the
unittest.TestCase in the multiple inheritance hierarchy.
e.g. class TestClass(AssertRaisesRegexMixin, unittest.TestCase)
This class/mixin and its usage can be removed when pygame no longer
supports python < 3.2.
"""
def assertRaisesRegex(self, *args, **kwargs):
try:
return super(AssertRaisesRegexMixin, self).assertRaisesRegex(
*args, **kwargs
)
except AttributeError:
try:
return super(AssertRaisesRegexMixin, self).assertRaisesRegexp(
*args, **kwargs
)
except AttributeError:
self.skipTest("No assertRaisesRegex/assertRaisesRegexp method")
################################################################################
this_dir = os.path.dirname(os.path.abspath(__file__))
trunk_dir = os.path.split(os.path.split(this_dir)[0])[0]
if is_pygame_pkg:
test_module = "tests"
else:
test_module = "test"
def trunk_relative_path(relative):
return os.path.normpath(os.path.join(trunk_dir, relative))
def fixture_path(path):
return trunk_relative_path(os.path.join(test_module, "fixtures", path))
def example_path(path):
return trunk_relative_path(os.path.join("examples", path))
sys.path.insert(0, trunk_relative_path("."))
################################## TEMP FILES ##################################
def get_tmp_dir():
return tempfile.mkdtemp()
################################################################################
def question(q):
return raw_input_("\n%s (y/n): " % q.rstrip(" ")).lower().strip() == "y"
def prompt(p):
return raw_input_("\n%s (press enter to continue): " % p.rstrip(" "))
#################################### HELPERS ###################################
def rgba_between(value, minimum=0, maximum=255):
if value < minimum:
return minimum
elif value > maximum:
return maximum
else:
return value
def combinations(seqs):
"""
Recipe 496807 from ActiveState Python CookBook
Non recursive technique for getting all possible combinations of a sequence
of sequences.
"""
r = [[]]
for x in seqs:
r = [i + [y] for y in x for i in r]
return r
def gradient(width, height):
"""
Yields a pt and corresponding RGBA tuple, for every (width, height) combo.
Useful for generating gradients.
Actual gradient may be changed, no tests rely on specific values.
Used in transform.rotate lossless tests to generate a fixture.
"""
for l in xrange_(width):
for t in xrange_(height):
yield (l, t), tuple(map(rgba_between, (l, t, l, l + t)))
def rect_area_pts(rect):
for l in xrange_(rect.left, rect.right):
for t in xrange_(rect.top, rect.bottom):
yield l, t
def rect_perimeter_pts(rect):
"""
Returns pts ((L, T) tuples) encompassing the perimeter of a rect.
The order is clockwise:
topleft to topright
topright to bottomright
bottomright to bottomleft
bottomleft to topleft
Duplicate pts are not returned
"""
clock_wise_from_top_left = (
[(l, rect.top) for l in xrange_(rect.left, rect.right)],
[(rect.right - 1, t) for t in xrange_(rect.top + 1, rect.bottom)],
[(l, rect.bottom - 1) for l in xrange_(rect.right - 2, rect.left - 1, -1)],
[(rect.left, t) for t in xrange_(rect.bottom - 2, rect.top, -1)],
)
for line in clock_wise_from_top_left:
for pt in line:
yield pt
def rect_outer_bounds(rect):
"""
Returns topleft outerbound if possible and then the other pts, that are
"exclusive" bounds of the rect
?------O
|RECT| ?|0)uterbound
|----|
O O
"""
return ([(rect.left - 1, rect.top)] if rect.left else []) + [
rect.topright,
rect.bottomleft,
rect.bottomright,
]
def import_submodule(module):
m = __import__(module)
for n in module.split(".")[1:]:
m = getattr(m, n)
return m
class SurfaceSubclass(pygame.Surface):
"""A subclassed Surface to test inheritance."""
def __init__(self, *args, **kwargs):
super(SurfaceSubclass, self).__init__(*args, **kwargs)
self.test_attribute = True
def test():
"""
Lightweight test for helpers
"""
r = pygame.Rect(0, 0, 10, 10)
assert rect_outer_bounds(r) == [(10, 0), (0, 10), (10, 10)] # tr # bl # br
assert len(list(rect_area_pts(r))) == 100
r = pygame.Rect(0, 0, 3, 3)
assert list(rect_perimeter_pts(r)) == [
(0, 0),
(1, 0),
(2, 0), # tl -> tr
(2, 1),
(2, 2), # tr -> br
(1, 2),
(0, 2), # br -> bl
(0, 1), # bl -> tl
]
print("Tests: OK")
################################################################################
| [
"[email protected]"
] | |
665dcc52eba524df257caff6e50e0b2f063ee789 | ae65873c3584cef7139066b224daad04410af6d2 | /Top10Words.py | b6f4ffe80cc71c2040700a1f7c86913682066030 | [] | no_license | rajatkashyap/Python | 2240c7472d07803c460c7a55d570e20694b694f9 | f74c85c65b0e209a5f7ab25b653d42835222faaf | refs/heads/master | 2022-06-25T19:20:52.847498 | 2022-06-08T14:40:45 | 2022-06-08T14:40:45 | 145,714,257 | 0 | 0 | null | 2022-04-25T00:18:37 | 2018-08-22T13:39:14 | Python | UTF-8 | Python | false | false | 558 | py | f=open('UHC.txt')
dict={}
words=f.read().split()
for word in words:
w=word.lower()
dict[w]=dict.get(w,0)+1
#print dict
str_tups=[]
for k,v in dict.items():
str_tups.append((v,k))
#print str_tups
str_tups.sort(reverse=True)
print str_tups[:10]
keys=dict.keys()
values=dict.values()
#print keys
#print values
values.sort(reverse=True)
for i in range(10):
for key in keys:
if dict[key]==values[i]:
print key,values[i]
'''
for i in range(10):
for d in dict:
if d[keys[i]]==values[i]:
print d ''' | [
"[email protected]"
] | |
9ad9f1f73f94769307e72df7e57956a71565790a | e741d661b1cbb1d48eff4adff3ce8d424b0b3aee | /meiduo_mall/apps/payment/apps.py | d314f1fe58cfc99b4a93ba784b8eadf87541370a | [
"MIT"
] | permissive | Tao-bug/meiduo_project | 09b3900ab7e1436ee3201c53461a9c119d1f56db | e3a24eac2c8231d0e27f6a7fa639dd36baa410b0 | refs/heads/master | 2022-07-16T02:18:22.319641 | 2019-11-12T13:54:23 | 2019-11-12T13:54:23 | 218,697,808 | 0 | 0 | MIT | 2022-07-01T00:55:01 | 2019-10-31T06:20:01 | JavaScript | UTF-8 | Python | false | false | 94 | py | from django.apps import AppConfig
class PaymentConfig(AppConfig):
name = 'apps.payment'
| [
"[email protected]"
] | |
718c1a3aa265318be8f270943122a2fef285e6e9 | 59d48214613a195573b5a0a1f10b32c889172155 | /alexa/reciPullLambda/ask_sdk_model/canfulfill/can_fulfill_intent_request.py | 61ffc9fb00f47a05ab691639b45bca434c75fe2e | [
"MIT"
] | permissive | ReciPull/recipull.github.io | 60861ebb7a6d77d39907c6332e346194ce4ad107 | e6b800af02658bb7948297c4ddc1b7af6d978839 | refs/heads/master | 2023-01-08T19:03:11.864298 | 2019-06-13T05:07:39 | 2019-06-13T05:07:39 | 180,684,629 | 1 | 0 | MIT | 2022-12-09T22:33:18 | 2019-04-11T00:33:03 | Python | UTF-8 | Python | false | false | 6,414 | py | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.dialog_state import DialogState
from ask_sdk_model.intent import Intent
class CanFulfillIntentRequest(Request):
"""
An object that represents a request made to skill to query whether the skill can understand and fulfill the intent request with detected slots, before actually asking the skill to take action. Skill should be aware this is not to actually take action, skill should handle this request without causing side-effect, skill should not modify some state outside its scope or has an observable interaction with its calling functions or the outside world besides returning a value, such as playing sound,turning on/off lights, committing a transaction or a charge.
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param dialog_state:
:type dialog_state: (optional) ask_sdk_model.dialog_state.DialogState
:param intent:
:type intent: (optional) ask_sdk_model.intent.Intent
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'dialog_state': 'ask_sdk_model.dialog_state.DialogState',
'intent': 'ask_sdk_model.intent.Intent'
} # type: Dict
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'dialog_state': 'dialogState',
'intent': 'intent'
} # type: Dict
def __init__(self, request_id=None, timestamp=None, locale=None, dialog_state=None, intent=None):
# type: (Optional[str], Optional[datetime], Optional[str], Optional[DialogState], Optional[Intent]) -> None
"""An object that represents a request made to skill to query whether the skill can understand and fulfill the intent request with detected slots, before actually asking the skill to take action. Skill should be aware this is not to actually take action, skill should handle this request without causing side-effect, skill should not modify some state outside its scope or has an observable interaction with its calling functions or the outside world besides returning a value, such as playing sound,turning on/off lights, committing a transaction or a charge.
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param dialog_state:
:type dialog_state: (optional) ask_sdk_model.dialog_state.DialogState
:param intent:
:type intent: (optional) ask_sdk_model.intent.Intent
"""
self.__discriminator_value = "CanFulfillIntentRequest" # type: str
self.object_type = self.__discriminator_value
super(CanFulfillIntentRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.dialog_state = dialog_state
self.intent = intent
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, CanFulfillIntentRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
4a5a3b8daa86ac399ae0a0cc3604254a77635bbf | 00cb405170a6a9572bef0ec8f373813eada08c03 | /Agario/Window.py | bb5038b0e49d302d2ded8589eaacfcc9884a849c | [] | no_license | MarcPartensky/Python-Games | c0ad2857be5832d6029642bb0a96bc8e403a12e3 | ebfcaaf4a028eddb36bbc99184eb3f7a86eb24ed | refs/heads/master | 2022-09-03T00:04:16.402288 | 2022-08-12T17:10:22 | 2022-08-12T17:10:22 | 166,606,022 | 2 | 1 | null | 2021-03-07T16:20:15 | 2019-01-19T23:56:04 | Python | UTF-8 | Python | false | false | 1,621 | py | import pygame
from pygame.locals import *
class Window:
made=0
def __init__(self,game=None,size=None,font="monospace",set=True):
Window.made+=1
self.number=Window.made
self.title=game.name
self.font=font
self.open=True
pygame.init()
self.setSize(size)
self.font = pygame.font.SysFont(self.font, 65)
self.screen=pygame.display.set_mode(self.size)
pygame.display.set_caption(self.title)
def setSize(self,size=None):
if size is None:
info = pygame.display.Info()
self.size=(info.current_w/2,info.current_h/2)
else:
self.size=size
def pop_up(self,message):
pass
def scale(self,picture,size):
return pygame.transform.scale(picture,size)
def check(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.open=False
def select(self):
while self.open:
self.check()
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN and event.button == 1:
return (event.pos[0],event.pos[1])
def point(self):
for event in pygame.event.get():
return (event.pos[0],event.pos[1])
def flip(self):
pygame.display.flip()
def drawBackground(self,background):
if type(background) is tuple:
self.screen
self.screen.blit(picture, position)
def drawPicture(self,picture,position):
self.screen.blit(picture, position)
def display(page):
pass
| [
"[email protected]"
] | |
13fc2d742161aea7d1a51f351cac30e21bcd181e | 172eb751b879d36d95b04d81db87a501cd18d8a1 | /ImageNetGroundTruth/utils.py | ade17f70a1d804e602c97056b666102575e5f3e0 | [] | no_license | byh1321/Pruning_Quantization_Estimation | 447bd3d806fe17611d665e56d7796af4e05ee400 | 772969c5a58259e387c88829dd936274199212e8 | refs/heads/master | 2023-05-03T19:25:29.957732 | 2021-06-03T17:53:38 | 2021-06-03T17:53:38 | 289,804,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,298 | py | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import scipy.misc
from scipy import ndimage
import numpy as np
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 40.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
##########################################################################
# Codes under this line is written by YH.Byun
def print_4Dtensor_to_png(tensor, filename):
npimg = np.array(tensor,dtype=float)
npimg = npimg.squeeze(0)
scipy.misc.toimage(npimg).save(filename+".png")
def genblurkernel(sigma):
order = 0
radius = int(4 * float(sigma) + 0.5)
kernel = scipy.ndimage.filters._gaussian_kernel1d(sigma, order, radius)
return kernel
def setMask(net, area, val):
mask = maskGen(net)
for i in range(len(mask)):
num_filter = mask[i].size()[0]
depth = mask[i].size()[1]
if len(mask[i].size()) == 2:
if i == (len(mask)-1):
mask[i][:,0:int(depth*area)] = val
#print(mask[i].size())
#print('0, ',depth*area)
else:
mask[i][0:int(num_filter*area),0:int(depth*area)] = val
#print(mask[i].size())
#print(num_filter*area,',',depth*area)
elif len(mask[i].size()) == 4:
if i == 0:
mask[i][0:int(num_filter*area),:,:,:] = val
#print(mask[i].size())
#print(num_filter*area,',0,0,0')
else:
mask[i][0:int(num_filter*area),0:int(depth*area),:,:] = val
#print(mask[i].size())
#print(num_filter*area,',',depth*area,',0,0')
return mask
def saveInitialParameter(net, initparam):
net_param = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
net_param.append(m.weight.data)
elif isinstance(m, nn.Linear):
net_param.append(m.weight.data)
torch.save(net_param, initparam)
print("saving initial parameters")
def quantize(net, pprec):
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.round(m.weight.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.weight.data = torch.clamp(m.weight.data, -2, 2 - 2**(-pprec))
elif isinstance(m, nn.Linear):
m.weight.data = torch.round(m.weight.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.weight.data = torch.clamp(m.weight.data, -2, 2 - 2**(-pprec))
return net
def printLayers(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(m)
elif isinstance(m, nn.Linear):
print(m)
def maskGen(net, isbias=0, isempty = 1):
mask = []
if isempty:
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(torch.zeros(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.zeros(m.weight.data.size()).size())
elif isinstance(m, nn.Linear):
mask.append(torch.zeros(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.zeros(m.weight.data.size()).size())
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(torch.ones(m.weight.data.size()))
if isbias == 1:
mask.append(torch.ones(m.bias.data.size()))
#print(torch.ones(m.weight.data.size()).size())
elif isinstance(m, nn.Linear):
mask.append(torch.ones(m.weight.data.size()))
if isbias == 1:
mask.append(torch.zeros(m.bias.data.size()))
#print(torch.ones(m.weight.data.size()).size())
return mask
def pruneNetwork(net, mask):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.grad.data = torch.mul(m.weight.grad.data,mask[index].cuda())
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.grad.data = torch.mul(m.weight.grad.data,mask[index].cuda())
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
return net
def paramsGet(net):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
if index == 0:
params = m.weight.view(-1,)
index += 1
else:
params = torch.cat((params,m.weight.view(-1,)),0)
index += 1
elif isinstance(m, nn.Linear):
params = torch.cat((params,m.weight.view(-1,)),0)
index += 1
return params
def findThreshold(params, pr):
thres=0
while 1:
tmp = (torch.abs(params.data)<thres).type(torch.FloatTensor)
result = torch.sum(tmp)/params.size()[0]
if (pr/100)<result:
#print("threshold : {}".format(thres))
return thres
else:
thres += 0.0001
#def findThreshold(params, pr):
# params_sorted, indice = torch.sort(params)
# index = int(pr * params.size()[0] / 100)
# print(params_sorted[13228760])
# print(params.size())
# print(index)
# return params_sorted[index].item()
def getPruningMask(net, thres):
index = 0
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append((torch.abs(m.weight.data)>thres).type(torch.FloatTensor))
index += 1
elif isinstance(m, nn.Linear):
mask.append((torch.abs(m.weight.data)>thres).type(torch.FloatTensor))
index += 1
return mask
def netMaskMul(net, mask, isbias=0, isbatch=0):
index = 0
if isbatch:
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.mul(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.mul(m.bias.data,mask[index].cuda())
index += 1
return net
def addNetwork(net, net2, isbias=0):
index = 0
mask = saveNetwork(net2, isbias)
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
return net
def netMaskAdd(net, mask, isbias=0, isbatch=0):
index = 0
if isbatch:
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
else:
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.weight.data = torch.add(m.weight.data,mask[index].cuda())
index += 1
if isbias == 1:
m.bias.data = torch.add(m.bias.data,mask[index].cuda())
index += 1
return net
def saveNetwork(net, isbias=0):
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
mask.append(m.weight.data)
if isbias:
mask.append(m.bias.data)
elif isinstance(m, nn.Linear):
mask.append(m.weight.data)
if isbias:
mask.append(m.bias.data)
return mask
def saveBatch(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
if isempty:
mask.append(torch.zeros(m.weight.size()))
mask.append(torch.zeros(m.bias.size()))
else:
mask.append(m.weight.data)
mask.append(m.bias.data)
return mask
def printLayerName(net):
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(index, " : Conv2d layer, ", m.weight.size())
index += 1
elif isinstance(m, nn.Linear):
print(index, " : FC layer, ", m.weight.size())
index += 1
elif isinstance(m, nn.BatchNorm2d):
print(index, " : BatchNorm2d layer, ", m.weight.size())
index += 1
return net
def freezeNetwork(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
for param in m.parameters():
param.requires_grad = False
elif isinstance(m, nn.Linear):
for param in m.parameters():
param.requires_grad = False
elif isinstance(m, nn.BatchNorm2d):
for param in m.parameters():
param.requires_grad = False
return net
def absorb_bn(module, bn_module):
w = module.weight.data
if module.bias is None:
zeros = torch.Tensor(module.out_channels).zero_().type(w.type())
module.bias = nn.Parameter(zeros)
b = module.bias.data
invstd = bn_module.running_var.clone().add_(bn_module.eps).pow_(-0.5)
w.mul_(invstd.view(w.size(0), 1, 1, 1).expand_as(w))
b.add_(-bn_module.running_mean).mul_(invstd)
if bn_module.affine:
w.mul_(bn_module.weight.data.view(w.size(0), 1, 1, 1).expand_as(w))
b.mul_(bn_module.weight.data).add_(bn_module.bias.data)
bn_module.register_buffer('running_mean', torch.zeros(module.out_channels).cuda())
bn_module.register_buffer('running_var', torch.ones(module.out_channels).cuda())
bn_module.register_parameter('weight', None)
bn_module.register_parameter('bias', None)
bn_module.affine = False
def is_bn(m):
return isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)
def is_absorbing(m):
return isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear)
def search_absorbe_bn(model):
prev = None
for m in model.children():
if is_bn(m) and is_absorbing(prev):
m.absorbed = True
absorb_bn(prev, m)
search_absorbe_bn(m)
prev = m
#swap bias in net with bias in net2
def swapBias(net, net2):
mask_bias = saveBias(net2)
mask_bias_null = saveBias(net2, isempty=1)
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
return net
def saveBias(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
if isempty:
mask.append(torch.zeros(m.bias.data.size()))
else:
mask.append(m.bias.data)
elif isinstance(m, nn.Linear):
if isempty:
mask.append(torch.zeros(m.bias.data.size()))
else:
mask.append(m.bias.data)
return mask
def concatMask(mask1, mask2):
index = 0
for i in range(len(mask1)):
mask1[index] = ((mask1[index] + mask2[index]) != 0).type(torch.FloatTensor)
index += 1
return mask1
def getExtendedMask(mask):
index = torch.FloatTensor()
for i in range(len(mask)):
if mask[i].dim() == 4:
mask_size = mask[i].size()[0] * mask[i].size()[1] * mask[i].size()[2] * mask[i].size()[3]
if mask[i].size()[2] == 1:
if mask[i].size()[1] % 3 == 1:
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1]+2,1,1)
index_for_print[:,:-2,:,:] = mask[i].data
elif mask[i].size()[1] % 3 == 2:
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1]+1,1,1)
index_for_print[:,:-1,:,:] = mask[i].data
else:
index_for_print = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
else:
index_for_print = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
else:
mask_size = mask[i].size()[0] * mask[i].size()[1]
index_for_print = torch.zeros(mask[i].size()[0], mask[i].size()[1] + 1)
index_for_print[:,:-1] = mask[i].data
index_for_print = index_for_print.view(-1,3)
index_for_print = (torch.sum(index_for_print, dim=1) != 0).type(torch.FloatTensor)
index = torch.cat((index, index_for_print),0)
return index
def quantBatch(net, intbit, pprec):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.running_var.data = torch.round(m.running_var.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.running_var.data = torch.clamp(m.running_var.data, max=1, min=2**(-intbit))
m.weight.data = torch.round(m.weight.data / (2 ** -(15))) * (2 ** -(15))
m.weight.data = torch.clamp(m.weight.data,-(2) ** intbit, 2 ** intbit)
m.bias.data = torch.round(m.bias.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.bias.data = torch.clamp(m.bias.data,-(2) ** intbit, 2 ** intbit)
m.running_mean.data = torch.round(m.running_mean.data / (2 ** -(pprec))) * (2 ** -(pprec))
m.running_mean.data = torch.clamp(m.running_mean.data,-(2) ** intbit, 2 ** intbit)
return net
def swapBiasandBatch(net, net2):
mask_bias = saveBias(net2, isbatch=1)
mask_bias_null = saveBias(net2, isempty=1, isbatch=1)
index = 0
for m in net.modules():
if isinstance(m, nn.Conv2d):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.Linear):
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
index += 1
elif isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask_weight_null[index].cuda())
m.weight.data = torch.add(m.weight.data,mask_weight[index].cuda())
m.bias.data = torch.mul(m.bias.data,mask_bias_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_bias[index].cuda())
m.running_mean.data = torch.mul(m.running_mean.data,mask_running_mean_null[index].cuda())
m.running_mean.data = torch.add(m.running_mean.data,mask_running_mean[index].cuda())
m.running_var.data = torch.mul(m.running_var.data,mask_running_var_null[index].cuda())
m.running_var.data = torch.add(m.running_var.data,mask_running_var[index].cuda())
return net
def swapBatch(net, net2):
mask_batch = saveBatch(net2)
mask_batch_null = saveBatch(net2, isempty=1)
index = 0
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data = torch.mul(m.weight.data,mask_batch_null[index].cuda())
m.weight.data = torch.add(m.weight.data,mask_batch[index].cuda())
index += 1
m.bias.data = torch.mul(m.bias.data,mask_batch_null[index].cuda())
m.bias.data = torch.add(m.bias.data,mask_batch[index].cuda())
index += 1
m.running_mean.data = torch.mul(m.running_mean.data,mask_batch_null[index].cuda())
m.running_mean.data = torch.add(m.running_mean.data,mask_batch[index].cuda())
index += 1
m.running_var.data = torch.mul(m.running_var.data,mask_batch_null[index].cuda())
m.running_var.data = torch.add(m.running_var.data,mask_batch[index].cuda())
index += 1
return net
def saveBatch(net, isempty=0):
mask = []
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
if isempty:
mask.append(torch.zeros(m.weight.data.size()))
mask.append(torch.zeros(m.bias.data.size()))
mask.append(torch.zeros(m.running_mean.data.size()))
mask.append(torch.zeros(m.running_var.data.size()))
else:
mask.append(m.weight.data)
mask.append(m.bias.data)
mask.append(m.running_mean.data)
mask.append(m.running_var.data)
return mask
def printFeature(feature, filename):
f = open(filename, 'w')
for i in range(feature.data.size()[1]):
for j in range(feature.data.size()[2]):
for k in range(feature.data.size()[3]):
print(feature.data[0,i,j,k].item(), file=f, end=',')
print('',file=f)
print('',file=f)
f.close()
return
def printconv1_0(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
print(m.weight[0])
try:
print(m.bias[0])
except:
print("There is no bias")
pass
return
def printbatch1(net):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d):
print(m.weight)
print(m.bias)
print(m.running_mean)
print(m.running_var)
return
def printlinear1_0(net):
for m in net.modules():
if isinstance(m, nn.Linear):
print(m.weight[0])
try:
print(m.bias[0])
except:
print("There is no bias")
pass
return
def float_to_hex(float_):
temp = float_ * 2**7 # Scale the number up.
temp = torch.round(temp) # Turn it into an integer.
temp = int(temp)
temp = temp & 0xff
return '{:02x}'.format(temp)
def float_to_hex_16(float_):
temp = float_ * 2**8 # Scale the number up.
temp = torch.round(temp) # Turn it into an integer.
temp = int(temp)
temp = temp & 0xffff
return '{:04x}'.format(temp)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
from math import cos, pi
def adjust_learning_rate(optimizer, epoch, iteration, num_iter, ne, init_lr):
lr = optimizer.param_groups[0]['lr']
warmup_epoch = 5
warmup_iter = warmup_epoch * num_iter
current_iter = iteration + epoch * num_iter
max_iter = ne * num_iter
lr = init_lr * (1 + cos(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))) / 2
if epoch < warmup_epoch:
lr = init_lr * current_iter / warmup_iter
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| [
"[email protected]"
] | |
037c0297e6528cdbf68ecb8b3295c9ce74f0598e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/143/usersdata/126/62651/submittedfiles/av2_p3_m2.py | 7d22e7aa5f8b510c2f6ca3d97182ffc3fc5c67bd | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | # -*- coding: utf-8 -*-
def listadegraus(a):
b=[]
for i in range(0,len(a)-1,1):
if a[i]>a[i+1]:
cont=0
for i in range(a[i],a[i+1],-1):
cont=cont+1
b.insert(0,cont)
elif a[i]<a[i+1]:
cont=0
for i in range(a[i],a[i+1],1):
cont=cont+1
b.insert(0,cont)
elif a[i]==a[i+1]:
cont=0
b.insert(0,cont)
return(b)
n=int(input('digite a quantidade de termos da lista:'))
a=[]
for i in range(0,n,1):
m=int(input('digite um valor:'))
a.append(m)
print(listadegraus(a))
| [
"[email protected]"
] | |
df82e709433df0b153edd7d9aea14060851ad2cf | c31c8095ce4d4e9686e3e7ad6b004342e49671fa | /forum/classes/archives/CLASS_Lieu.py | c5b8db114583e2f045264fd8b45f2735706e116e | [] | no_license | Lionalisk/arrakambre | 7bcc96dea2ca2a471572bfb1646256f1382ce25b | 2caece9be5eebf21ddfa87a6c821c32b5d5019a2 | refs/heads/master | 2020-12-07T19:31:24.471090 | 2020-01-09T10:14:29 | 2020-01-09T10:14:29 | 232,782,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | from django.db import models
from forum.models import Maison
from forum.classes.CLASS_Perso import *
print('BBBB')
class Lieu(models.Model):
nom = models.CharField(max_length=100, unique=True)
description = models.TextField(default='')
image = models.CharField(max_length=40, default = 'lieu_none.jpg')
maison = models.ForeignKey(Maison, verbose_name="Maison", null=True, on_delete=models.SET_NULL, blank=True)
passages = models.ManyToManyField('self', blank=True)
lieu_parent = models.ForeignKey('self', verbose_name="Lieu", null=True, on_delete=models.DO_NOTHING, blank=True)
dissimulation = models.SmallIntegerField(default=0)
defense_garde = models.SmallIntegerField(default=0)
defense_assault = models.SmallIntegerField(default=0)
defense_intrusion = models.SmallIntegerField(default=0)
perso_autorise = models.ManyToManyField('Perso', blank=True, related_name = 'persos_autorises') # liste des personnes autorisees par le maitre des lieux a entrer
secret = models.BooleanField(default=False)
proprietaire = models.ForeignKey('Perso', null=True, on_delete=models.SET_NULL, blank=True, related_name = 'proprietaire')
#action =
def __str__(self):
return self.nom | [
"[email protected]"
] | |
da6fa81c852b746e1fded343f4e04a7e146e335e | 39b8aa964883b2bde4349e0c9c38e3233c310548 | /src/Power of Four.py | 96d2db9a48b59d6376e2dbcb8be1027d9d34085f | [] | no_license | orifake/leetcode-python | 053b82491e0b8d6197dd12d92eec5883211285db | 8e375ebebe0a0285efefc33ed61afb22f41d0c75 | refs/heads/master | 2023-03-09T14:32:17.833456 | 2021-02-26T16:09:31 | 2021-02-26T16:09:31 | 264,466,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | import math
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
return num > 0 and (num & (num - 1)) == 0 and \
((num & 0b01010101010101010101010101010101) == num)
class Solution2:
def isPowerOfFour(self, num: int) -> bool:
if num <= 0:
return False
return (math.log10(num) / math.log10(4)) % 1 == 0
t = Solution()
print(t.isPowerOfFour(4)) | [
"[email protected]"
] | |
17eb256179da0f291fdd0e5d21d32169501672e1 | e21ed71610f9d1004dfa21206300c0e9f3887e89 | /modulo_2/Codewars/dev-junior/find_even_array.py | beb4a2bad5d9a8b39ec87d16249da6a0ba36113a | [] | no_license | hpfn/wttd-2017-exerc | c0c79ee0cb3b5b331932787d280deee679357bc1 | b1bf1394d2e2adc29257b7c4273af21b8509335f | refs/heads/master | 2020-12-30T11:29:13.218980 | 2017-10-03T19:04:03 | 2017-10-03T19:04:03 | 91,572,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | # coding=utf-8
def find_even_index(arr):
tam_arr = len(arr)
for x in range(tam_arr):
if sum(arr[:x]) == sum(arr[x+1:]):
return x
return -1
| [
"[email protected]"
] | |
be24fff7640880924ac1b8352d63c9ce128039bd | 49beeee0d9aff3b776545cb553ef1bf15dd9f190 | /example/example/views.py | 6c06b12a01b8dad493049a74201b5a5b9af1ada9 | [
"MIT"
] | permissive | bluedisk/django-korean-fields | 238364cf4f766db824adec832aaa2d83619cded1 | b655e23d9a73e61cb217e34719ee6a2509f8f475 | refs/heads/master | 2020-03-19T09:55:10.974426 | 2018-11-10T15:02:02 | 2018-11-10T15:02:02 | 136,327,803 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
from django.forms import forms, CharField
from django.http import HttpResponse
from django.shortcuts import render
from korean.fields import JuminFormField
class TestForm(forms.Form):
jumin1 = JuminFormField()
jumin2 = JuminFormField()
def demo(request):
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
return HttpResponse('success : ' + form.cleaned_data['jumin'])
else:
form = TestForm(initial={'jumin1': '010203-4567890'})
return render(request, 'demo.html', {'form': form})
| [
"[email protected]"
] | |
d47c3724879680967f10765f503c820e7982fb3f | 714d4d2796e9b5771a1850a62c9ef818239f5e77 | /components/metrics/DEPS | 2f4d413d44817a460d2dc1304dd4027f1f530765 | [
"BSD-3-Clause"
] | permissive | CapOM/ChromiumGStreamerBackend | 6c772341f815d62d4b3c4802df3920ffa815d52a | 1dde005bd5d807839b5d45271e9f2699df5c54c9 | refs/heads/master | 2020-12-28T19:34:06.165451 | 2015-10-21T15:42:34 | 2015-10-23T11:00:45 | 45,056,006 | 2 | 0 | null | 2015-10-27T16:58:16 | 2015-10-27T16:58:16 | null | UTF-8 | Python | false | false | 243 | # This component is shared with the Chrome OS build, so it's important to limit
# dependencies to a minimal set.
include_rules = [
"-components",
"+components/compression",
"+components/metrics",
"+components/variations",
"-net",
]
| [
"[email protected]"
] | ||
7fb4ea8ca62ee742cb03add25202bb3018bba0d6 | 8562adfbeb7cf901aeeaf004dc1e53c286a24d48 | /beg86.py | ba49d5c28d0528682212047ecc0dd3986de5a4fc | [] | no_license | sarureddi/isogram | 1d4f8a7566a1df0f4a7b42502be60a1fafaabc10 | 3aca7e1172977cd116c0902761d70ded84402310 | refs/heads/master | 2020-06-03T11:03:43.392152 | 2019-06-12T09:54:11 | 2019-06-12T09:54:11 | 191,544,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | si1=str(input())
l=len(si1)
s=set(si1)
if(l==len(s)):
print("Yes")
else:
print("No")
| [
"[email protected]"
] | |
d3b6e9f0e660a6ab3559ab5e2029a46b8e10bf27 | 255efb54075eb8cc2412bf1d5c936a97a003337e | /xt/environment/__init__.py | 69338935f833cbdd1def7455667f8075e68b8eed | [
"MIT"
] | permissive | jinqiuzhao/xingtian | 914a4d48c62fd8b3d4ddd0479e9bab54bbe5cba7 | 95953dc6109c96e68dcdeb9755b3679ff51742d4 | refs/heads/master | 2023-06-06T06:20:28.815549 | 2021-07-02T10:00:42 | 2021-07-02T10:00:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Build environment module.
Do encapsulation for different simulations.
Unify the single and multi-agents.
"""
from __future__ import division, print_function
from xt.framework import Registers
def env_builder(env_name, env_info, **kwargs):
"""
Build the interface func for creating environment.
:param env_name:the name of environment
:param env_info: the config info of environment
:return:environment instance
"""
return Registers.env[env_name](env_info, **kwargs)
| [
"[email protected]"
] | |
d98e426c5ffa96200e49a63c91cbb1ac43216323 | 220e3fe31f00df908dc8d00c507400425f924cc3 | /examples/multi_system/act6/unload.py | bf0fcc574b45c2f7fcf2d21c030c21e4aa89ff1f | [
"MIT"
] | permissive | danielmitterdorfer/Thespian | 3ed700d9fc6da35becfe801d3ab3bb68c86bddbc | f59439df8a6147b90ec31b44924d6a1b620f09d9 | refs/heads/master | 2021-01-19T05:06:33.005708 | 2017-07-31T04:44:03 | 2017-07-31T04:44:03 | 65,544,862 | 0 | 0 | null | 2016-08-12T10:22:29 | 2016-08-12T10:22:29 | null | UTF-8 | Python | false | false | 238 | py | from thespian.actors import ActorSystem, Actor, ValidateSource, ValidatedSource
import sys
portnum = int(sys.argv[1])
srchash = sys.argv[2]
asys = ActorSystem('multiprocTCPBase', {'Admin Port': portnum})
asys.unloadActorSource(srchash)
| [
"[email protected]"
] | |
46e48392571cf7b50609181560a7a5b5cfd54d72 | 1d665f40197ba89f756e862c0e62a889c42cddfb | /commission/migrations/0007_auto_20150407_2034.py | 2b1be1c3a9965aa2314ab05057b9179433f0c7eb | [
"MIT"
] | permissive | Ourinternet/website | 8d9f9ddfe7d17fb0bb11b978cf3a7cd34af456ed | 648203c0d0620da2d11b3b0e398ee218b5bef5df | refs/heads/master | 2021-01-21T21:49:06.834576 | 2016-03-16T20:43:58 | 2016-03-16T20:43:58 | 15,683,988 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('commission', '0006_auto_20150407_1825'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='link',
field=models.CharField(max_length=1024, null=True, blank=True),
),
]
| [
"[email protected]"
] | |
044985b9b265586f2b071cc1296c5845a039b17d | 56b7e5ed6941fc4b83148e00bd51421dc3ac993a | /Indeed/Expire Map.py | 2b1778212c66da456e0bb6bd3e0defd2bbc1db77 | [] | no_license | samir-0711/Leetcode-Python | f960e15015a3f2fd88f723d7f9237945a7133553 | d75876ae96bcd85c67bbfbf91bbc0f0bc773e97c | refs/heads/master | 2022-12-18T05:27:48.224001 | 2020-09-30T21:03:42 | 2020-09-30T21:03:42 | 300,061,318 | 0 | 0 | null | 2020-09-30T20:59:42 | 2020-09-30T20:59:42 | null | UTF-8 | Python | false | false | 722 | py | import time
class Data:
def __init__(self, value, duration):
self.value = value
self.duration = duration
self.startTime = int(round(time.time()))
class ExpireMap:
def __init__(self):
self.map = {}
def get(self, key):
data = self.map[key]
if data == None:
return None
currTime = int(round(time.time()))
if currTime - data.startTime <= data.duration:
return data.value
else:
del data
def set(self, key, value, duration):
data = Data(value, duration)
self.map[key] = data
test1 = ExpireMap()
test1.set(1, 5, 3)
time.sleep(2)
print test1.get(1)
time.sleep(2)
print test1.get(1)
| [
"[email protected]"
] | |
6d9a899cc5415e40329693b80d3cc1bbf9759db2 | a257bf65a2a1ba2c6841dd25c89d98c5672e4e57 | /BackEnd/Semana22/DjangoRestFramework/DjangoRestFramework/wsgi.py | 424593130b609b9f268eda5e5d98d2c974645dad | [] | no_license | jorgegarba/CodiGo9 | 190cb67e3c7f9cbad271baf62657bda7ca03ec42 | 3b85c36a3ed8d2d5ee1d0fb6e8ca18599621fe47 | refs/heads/master | 2023-01-22T22:31:00.244982 | 2020-03-31T17:59:37 | 2020-03-31T17:59:37 | 211,982,487 | 6 | 5 | null | 2023-01-05T05:23:27 | 2019-10-01T00:21:25 | JavaScript | UTF-8 | Python | false | false | 415 | py | """
WSGI config for DjangoRestFramework project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoRestFramework.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
6f9219124cdf28edd912b1cbde65e7ea17aece30 | 7b315bbe8c85ce05e6c51112e985ae1b392d83f5 | /desafio_calcipv4/__init__.py | 52688559769bae42e626eb8e42a779ae27f16e24 | [] | no_license | Cica013/aprendendoPython | e9f993b1b144e294a338a53f2bc36673d3cd00a6 | 9c964f2322e3d52b39a811aceec64b169bab4e10 | refs/heads/main | 2023-08-10T20:12:47.640239 | 2021-10-06T21:01:19 | 2021-10-06T21:01:19 | 385,755,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | from classes.calcipv4 import CalcIpv4
calc_ipv4 = CalcIpv4(ip='192.168.0.1', mascara='255.255.255.0')
| [
"[email protected]"
] | |
0bebf2b16ff727c6ad6f1d7aca0f42970ec1dc48 | bed559d18b0a9604e6d18879e1f3837d228d1440 | /rx/backpressure/pausable.py | 631ce64e952fd6f555f3e9866c6f605c96299a8e | [
"Apache-2.0"
] | permissive | jesonjn/RxPY | a80b7a8f0a3a8a6ddcb7f3ed678d2f8411cad84e | 9dfb62979f2c54b93bbb8c0ee5fa18cfae4d73d0 | refs/heads/master | 2020-12-29T00:25:17.866220 | 2014-11-15T10:24:05 | 2014-11-15T10:24:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,932 | py | from six import add_metaclass
from rx import Observable
from rx.internal import ExtensionMethod
from rx.disposables import CompositeDisposable, Disposable
from rx.subjects import Subject
class PausableObservable(Observable):
def __init__(self, source, subject=None):
self.source = source
self.subject = subject or Subject()
self.is_paused = True
super(PausableObservable, self).__init__(self.subscribe)
def subscribe(self, observer):
conn = self.source.publish()
subscription = conn.subscribe(observer)
connection = [Disposable.empty()]
def on_next(b):
if b:
connection[0] = conn.connect()
else:
connection[0].dispose()
connection[0] = Disposable.empty()
pausable = self.subject.distinct_until_changed().subscribe(on_next)
return CompositeDisposable(subscription, connection[0], pausable)
def pause(self):
if self.is_paused:
return
self.is_paused = True
self.subject.on_next(False)
def resume(self):
if not self.is_paused:
return
self.is_paused = False
self.subject.on_next(True)
@add_metaclass(ExtensionMethod)
class ObservablePausable(Observable):
"""Uses a meta class to extend Observable with the methods in this class"""
def pausable(self, pauser):
"""Pauses the underlying observable sequence based upon the observable
sequence which yields True/False.
Example:
pauser = rx.Subject()
source = rx.Observable.interval(100).pausable(pauser)
Keyword parameters:
pauser -- {Observable} The observable sequence used to pause the
underlying sequence.
Returns the observable {Observable} sequence which is paused based upon
the pauser."""
return PausableObservable(self, pauser)
| [
"[email protected]"
] | |
b891a21e50fd7f9a52706f2b802ad343cca4ea72 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/compute_management_client_enums.py | 94796a92c7936618c37a51b7bf0ec2a9b37639ee | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,085 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class StorageAccountTypes(str, Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
class OperatingSystemTypes(str, Enum):
windows = "Windows"
linux = "Linux"
class DiskCreateOption(str, Enum):
empty = "Empty"
attach = "Attach"
from_image = "FromImage"
import_enum = "Import"
copy = "Copy"
restore = "Restore"
class SnapshotStorageAccountTypes(str, Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
standard_zrs = "Standard_ZRS"
class AccessLevel(str, Enum):
none = "None"
read = "Read"
| [
"[email protected]"
] | |
cddab9580d9af9da3a18d635c9717ed2acc1f201 | 4bc2d855558ccb962991f997e9779919031687dd | /capstone/causalmodel/migrations/0001_initial.py | d9fe267a7a9b8e4c5697913127b312847c7b2554 | [] | no_license | jmblontoc/Likha-Capstone | 80081e44b7ad6457eb776432e623c6db8b7a17e2 | e1c32911b58cd1419c8e1a554ac32210456d201d | refs/heads/master | 2022-12-10T03:26:32.946638 | 2018-12-09T04:33:10 | 2018-12-09T04:33:10 | 134,726,142 | 0 | 1 | null | 2022-11-25T23:52:42 | 2018-05-24T14:21:36 | Python | UTF-8 | Python | false | false | 1,187 | py | # Generated by Django 2.0.5 on 2018-06-27 15:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DataMap',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('metric', models.CharField(max_length=255)),
('value', models.DecimalField(decimal_places=2, max_digits=10)),
('threshold', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='RootCause',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
),
migrations.AddField(
model_name='datamap',
name='root_cause',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='causalmodel.RootCause'),
),
]
| [
"[email protected]"
] | |
770781cf8434a6484eb3418aafba1bd504f0315d | 1a819b4d69a7c455199b638b1609d3284ecbf255 | /alttprbot_srl/racebot.py | c760ffc28d30de0301fd73fb1bf3fb04a1d6a28b | [] | no_license | Maxor14/sahasrahbot | 5167355a23a4e9d91171b583fe8065acd0ab99a6 | 9183933869f87743d94867cf52c463179d0b687a | refs/heads/master | 2021-05-22T21:30:54.015013 | 2020-04-01T01:01:47 | 2020-04-01T01:01:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,951 | py | import asyncio
import math
import re
import ircmessage
from alttprbot.database import spoiler_races, srl_races
from alttprbot.tournament import league
from alttprbot.util.srl import srl_race_id
from alttprbot_srl import alt_hunter, discord_integration
from config import Config as c
starting = re.compile(
"\\x034\\x02The race will begin in 10 seconds!\\x03\\x02")
go = re.compile("\\x034\\x02GO!\\x03\\x02")
newroom = re.compile(
"Race initiated for (.*)\. Join\\x034 (#srl-[a-z0-9]{5}) \\x03to participate\.")
runnerdone = re.compile(
"(.*) (has forfeited from the race\.|has finished in .* place with a time of [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.)")
racedone = re.compile(
"^Status: Complete \| Game: .*$"
)
srl_game_whitelist = [
'The Legend of Zelda: A Link to the Past Hacks',
'A Link to the Past & Super Metroid Combo Randomizer'
]
async def topic_change_handler(target, source, message, client):
if not (source == 'RaceBot' or source == 'synack'):
return
if target.startswith('#srl-') and racedone.search(message):
await asyncio.sleep(5)
await league.process_league_race_finish(target, client)
async def handler(target, source, message, client):
if not (source == 'RaceBot' or source == 'synack'):
return
srl_id = srl_race_id(target)
if target == '#speedrunslive':
result = newroom.search(message)
if result and result.group(1) in srl_game_whitelist:
if not c.DEBUG:
await asyncio.sleep(1)
await client.join(result.group(2))
await asyncio.sleep(60)
await client.message(result.group(2), "Hi! I'm SahasrahBot, your friendly robotic elder and ALTTPR/SMZ3 seed roller. To see what I can do, visit https://sahasrahbot.synack.live")
else:
print(f'would have joined {result.group(2)}')
if target.startswith('#srl-'):
if starting.match(message) or message == 'test starting':
race = await srl_races.get_srl_race_by_id(srl_id)
if race:
if not client.in_channel(target):
await client.join(target)
await client.message(target, f".setgoal {race['goal']}")
if race['message'] is not None:
await asyncio.sleep(15)
await client.message(target, race['message'])
await srl_races.delete_srl_race(srl_id)
if go.match(message) or message == 'test go':
# spoilers
race = await spoiler_races.get_spoiler_race_by_id(srl_id)
if race:
await client.message(target, 'Sending spoiler log...')
await client.message(target, '---------------')
await client.message(target, f"This race\'s spoiler log: {race['spoiler_url']}")
await client.message(target, '---------------')
await client.message(target, 'GLHF! :mudora:')
await countdown_timer(
ircbot=client,
duration_in_seconds=race['studytime'],
srl_channel=target,
beginmessage=True,
)
await spoiler_races.delete_spoiler_race(srl_id)
await discord_integration.discord_race_start(srl_id)
await alt_hunter.check_race(srl_id)
if message == 'test complete':
await topic_change_handler(target, source, message, client)
result = runnerdone.search(message)
if result:
await discord_integration.discord_race_finish(result.group(1), srl_id)
async def countdown_timer(ircbot, duration_in_seconds, srl_channel, beginmessage=False):
loop = asyncio.get_running_loop()
reminders = [1800, 1500, 1200, 900, 600, 300,
120, 60, 30, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]
start_time = loop.time()
end_time = loop.time() + duration_in_seconds
while True:
# print(datetime.datetime.now())
timeleft = math.ceil(start_time - loop.time() + duration_in_seconds)
# print(timeleft)
if timeleft in reminders:
minutes = math.floor(timeleft/60)
seconds = math.ceil(timeleft % 60)
if minutes == 0 and seconds > 10:
msg = f'{seconds} second(s) remain!'
elif minutes == 0 and seconds <= 10:
msg = ircmessage.style(
f"{seconds} second(s) remain!", fg='green', bold=True)
else:
msg = f'{minutes} minute(s), {seconds} seconds remain!'
await ircbot.message(srl_channel, msg)
reminders.remove(timeleft)
if loop.time() >= end_time:
if beginmessage:
await ircbot.message(srl_channel, ircmessage.style('Log study has finished. Begin racing!', fg='red', bold=True))
break
await asyncio.sleep(.5)
| [
"[email protected]"
] | |
07c821b253d8b2176af47cd42bb65e0f706db38a | 3109e3a7f2f2dccc5a806695f0adbe0fed879112 | /ecommerce/Loma/migrations/0022_auto_20190204_1200.py | 4724c3c1c3f80c03fa75c1a13fc32a1f6bb13401 | [] | no_license | Maheshwari2604/ecommercee | 9ebbf18b4fbf933a0d9641009f7f17ce836de587 | 4411e7e10eccda907711200d2c0d873db3d7f803 | refs/heads/master | 2020-04-20T18:03:49.575124 | 2019-02-12T16:02:05 | 2019-02-12T16:02:05 | 169,007,411 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-04 06:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Loma', '0021_auto_20190203_1829'),
]
operations = [
migrations.AlterField(
model_name='promocode_model',
name='promocode_name',
field=models.CharField(max_length=11),
),
]
| [
"[email protected]"
] | |
8c1b2c443b10f64ad81dbb48b78341c22ec527dc | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-bss/huaweicloudsdkbss/v2/model/discount_info_v3.py | 3eeec1c5d49a77c443407f9193187e6c6e93816a | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,663 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DiscountInfoV3:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'discount_id': 'str',
'discount_value': 'str',
'discount_type': 'int',
'orders': 'list[OrderV3]'
}
attribute_map = {
'discount_id': 'discount_id',
'discount_value': 'discount_value',
'discount_type': 'discount_type',
'orders': 'orders'
}
def __init__(self, discount_id=None, discount_value=None, discount_type=None, orders=None):
"""DiscountInfoV3 - a model defined in huaweicloud sdk"""
self._discount_id = None
self._discount_value = None
self._discount_type = None
self._orders = None
self.discriminator = None
self.discount_id = discount_id
self.discount_value = discount_value
self.discount_type = discount_type
self.orders = orders
@property
def discount_id(self):
"""Gets the discount_id of this DiscountInfoV3.
订单的可用折扣ID。 支付订单时,输入该参数的值,即可使用折扣。
:return: The discount_id of this DiscountInfoV3.
:rtype: str
"""
return self._discount_id
@discount_id.setter
def discount_id(self, discount_id):
"""Sets the discount_id of this DiscountInfoV3.
订单的可用折扣ID。 支付订单时,输入该参数的值,即可使用折扣。
:param discount_id: The discount_id of this DiscountInfoV3.
:type: str
"""
self._discount_id = discount_id
@property
def discount_value(self):
"""Gets the discount_value of this DiscountInfoV3.
折扣率或者满减值,如果折扣模式是一口价,这个值为空。
:return: The discount_value of this DiscountInfoV3.
:rtype: str
"""
return self._discount_value
@discount_value.setter
def discount_value(self, discount_value):
"""Sets the discount_value of this DiscountInfoV3.
折扣率或者满减值,如果折扣模式是一口价,这个值为空。
:param discount_value: The discount_value of this DiscountInfoV3.
:type: str
"""
self._discount_value = discount_value
@property
def discount_type(self):
"""Gets the discount_type of this DiscountInfoV3.
折扣类型,取值为 0:促销折扣1:合同折扣2:商务优惠3:合作伙伴授予折扣609:订单调价折扣
:return: The discount_type of this DiscountInfoV3.
:rtype: int
"""
return self._discount_type
@discount_type.setter
def discount_type(self, discount_type):
"""Sets the discount_type of this DiscountInfoV3.
折扣类型,取值为 0:促销折扣1:合同折扣2:商务优惠3:合作伙伴授予折扣609:订单调价折扣
:param discount_type: The discount_type of this DiscountInfoV3.
:type: int
"""
self._discount_type = discount_type
@property
def orders(self):
"""Gets the orders of this DiscountInfoV3.
可使用折扣的订单列表。 具体请参见表3。
:return: The orders of this DiscountInfoV3.
:rtype: list[OrderV3]
"""
return self._orders
@orders.setter
def orders(self, orders):
"""Sets the orders of this DiscountInfoV3.
可使用折扣的订单列表。 具体请参见表3。
:param orders: The orders of this DiscountInfoV3.
:type: list[OrderV3]
"""
self._orders = orders
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscountInfoV3):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
4e8fb660e0be3d0885aa9b36d0333165ee44736b | a33ee2ee3d67526fa353060b7efe48398d38e8db | /demovibes/webview/views.py | cc242832d23da2a871237f1c2cc6ad7ce9bc131c | [] | no_license | rj76/demovibes-cvgm | 5666164f57a5458872f6add1eb18620aa0fd5072 | 8c0f5e011baec3c9b732165c9c74dd07c87c290f | refs/heads/master | 2023-06-02T11:41:16.093070 | 2021-06-19T02:28:38 | 2021-06-19T02:28:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86,960 | py | from webview import models as m
from webview import forms as f
from webview import common
from webview.decorators import atomic, cached_method
from openid_provider.models import TrustedRoot
from mybaseview import MyBaseView
from tagging.models import TaggedItem
import tagging.utils
from forum import models as fm
from django.template import Context, loader
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect, HttpResponseNotFound, HttpResponse
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth import logout
from django.shortcuts import get_object_or_404, redirect
from django.template import TemplateDoesNotExist
from django.conf import settings
from django.views.generic.simple import direct_to_template
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.core.cache import cache
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import authenticate, login
from django.db.models import Count, Sum, Avg, Max
from django.db.models import Q as DQ
import logging
import datetime
import j2shim
import hashlib
import re
import random
L = logging.getLogger('webview.views')
class WebView(MyBaseView):
basetemplate = "webview/"
class SongView(WebView):
def initialize(self):
songid = self.kwargs['song_id']
self.context['song'] = self.song = get_object_or_404(m.Song, id=songid)
class SongAddScreenshot(SongView):
def GET(self):
return create_screenshot(self.request, self.song)
class CompilationView(WebView):
def initialize(self):
compid = self.kwargs['compilation_id']
self.context['compilation'] = self.compilation = get_object_or_404(m.Compilation, id=compid)
class CompilationAddScreenshot(CompilationView):
def GET(self):
return create_screenshot(self.request, self.compilation)
class ProfileView(WebView):
def initialize(self):
username = self.kwargs['user']
self.user = get_object_or_404(m.User, username = username)
self.profile = common.get_profile(self.user)
def check_permissions(self):
return self.profile.viewable_by(self.request.user)
class ListByLetter(WebView):
"""
List a model by letter, if given.
Model need to have "startswith" and letter var need to be "letter"
"""
model = None
alphalist_cache_prefix = "ListByLetter-alphalist-"
desc_function = None
# Support for that should be included in the template
list_title = "List"
letter_url_name = ""
all_url_name = ""
def initialize (self):
query_hexdigest = hashlib.md5 (str(self.get_objects ().query)).hexdigest()
self.__alphalist_cache_key = self.alphalist_cache_prefix + query_hexdigest
alphalist = self.get_alphalist ()
letter = self.kwargs.get ("letter", False)
if letter and not letter in alphalist or letter == '-':
letter = '#'
self.letter = letter
self.context ['letter'] = letter
self.context ['al'] = alphalist
def get_list_title (self):
return self.list_title
def get_objects (self):
return self.model.objects.all()
def get_alphalist (self):
@cached_method (key = self.__alphalist_cache_key, timeout = 3)
def get ():
return map (lambda x: x['startswith'] == '#' and '-' or x['startswith'],
self.get_objects ().distinct().values ('startswith').order_by('startswith'))
return get ()
def set_context(self):
if self.model:
if self.letter:
results = self.get_objects().filter (startswith = self.letter)
else:
results = self.get_objects()
return {'object_list' : results,
'list_title' : self.get_list_title (),
'letter_url_name' : self.letter_url_name,
'all_url_name' : self.all_url_name,
'desc_function' : self.desc_function}
return {}
class AjaxifyView(WebView):
redirect_to = "dv-root"
def GET(self):
if not self.request.is_ajax():
self.redirect(self.redirect_to)
return HttpResponse("")
def make_ajax_return(self):
return HttpResponse("You forgot to define 'make_ajax_return', mate!")
def POST(self):
if not self.request.user.is_authenticated():
if self.request.is_ajax():
return HttpResponse("")
return self.redirect("/account/signin/")
songid = self.request.POST.get("songid")
if songid:
self.song = m.Song.objects.get(id = songid)
self.handle_form(self.request.POST)
if self.request.is_ajax():
return self.make_ajax_return()
self.redirect(self.request.META.get('HTTP_REFERER') or self.redirect_to)
def check_muted(request):
profile = request.user.get_profile()
muted = profile.is_muted()
if muted:
return j2shim.r2r('webview/muted.html', {'muted' : muted}, request)
#-------------------------------------------------------
class ListSmileys(WebView):
template = "smileys.html"
def set_context(self):
return {'smileys': settings.SMILEYS}
class PlaySong(SongView):
template="playsong.html"
def check_permissions(self):
return self.song.downloadable_by(self.request.user)
def set_context(self):
limit = None
if m.CHEROKEE_SECRET:
key = "urlgenlimit_%s" % self.request.user.id
number = m.get_cherokee_limit(self.request.user).get("number",0)
limit = number - cache.get(key, 0)
self.song.log(self.request.user, "Song preview / download")
return {'song': self.song, 'limit': limit}
class AddCompilation(WebView):
template = "add_compilation.html"
login_required = True
forms = [
(f.CreateCompilationForm, "compform"),
]
action = "created"
def pre_view(self):
self.context['songsinput']=""
def save_compilation(self, compdata, songs):
newcf = compdata.save(commit=False)
if not newcf.id:
newcf.created_by = self.request.user
newcf.status = "U"
newcf.last_updated = datetime.datetime.now() # Fixes bug of new compilations not appearing in Recent Updates
newcf.save()
compdata.save_m2m()
artists = []
playtime = 0
newcf.reset_songs()
for index, S in enumerate(songs):
newcf.add_song(S, index)
playtime = playtime + S.get_songlength()
for a in S.get_metadata().artists.all():
if a not in artists:
artists.append(a)
newcf.running_time = playtime
newcf.prod_artists.clear()
for a in artists:
newcf.prod_artists.add(a)
newcf.save()
newcf.log(self.request.user, "Compilation %s" % self.action)
return newcf
def POST(self):
songstr = self.request.POST.get("songsinput", "").split(",")
self.context['songsinput'] = self.request.POST.get("songsinput", "")
songs = []
if songstr:
for S in songstr:
# By default songsinput is empty but in fact we have one entry in list (u'')
# So the code will goes here ... but not valid S
if S:
songs.append(m.Song.objects.get(id=S))
if self.forms_valid and songs:
newcf = self.save_compilation(self.context["compform"], songs)
self.redirect(newcf)
class EditCompilation(AddCompilation):
staff_required = True
action = "edited"
def form_compform_init(self):
ci = self.kwargs.get("comp_id")
self.c = m.Compilation.objects.get(id=ci)
return {'instance': self.c}
def post_view(self):
if not self.context['songsinput']:
songs = self.c.get_songs()
self.context['songsinput'] = ','.join([ str(s.id) for s in songs ])
def about_pages(request, page):
try:
return direct_to_template(request, template="about/%s.html" % page)
except TemplateDoesNotExist:
return HttpResponseNotFound()
@login_required
def inbox(request):
pms = request.GET.get('type','')
delete = request.GET.get('delete','')
if delete:
try:
delpm = int(delete)
pm = m.PrivateMessage.objects.get(pk = delpm, to = request.user)
except:
return HttpResponseNotFound()
pm.visible = False
pm.save()
if pms == "sent":
mails = m.PrivateMessage.objects.filter(sender = request.user, visible = True)
else:
pms = "received" #to remove injects
mails = m.PrivateMessage.objects.filter(to = request.user, visible = True)
return j2shim.r2r('webview/inbox.html', {'mails' : mails, 'pms': pms}, request=request)
@login_required
def read_pm(request, pm_id):
pm = get_object_or_404(m.PrivateMessage, id = pm_id)
if pm.to == request.user:
pm.unread = False
pm.save()
return j2shim.r2r('webview/view_pm.html', {'pm' : pm}, request=request)
if pm.sender == request.user:
return j2shim.r2r('webview/view_pm.html', {'pm' : pm}, request=request)
return HttpResponseRedirect(reverse('dv-inbox'))
@login_required
def send_pm(request):
r = check_muted(request)
if r:
return r
if request.method == 'POST':
form = f.PmForm(request.POST)
if form.is_valid():
F = form.save(commit=False)
F.sender=request.user
F.save()
m.send_notification("%s sent you a <a href='%s'>message</a> with title '%s'" % (escape(F.sender.username), F.get_absolute_url(), escape(F.subject)), F.to)
return HttpResponseRedirect(reverse('dv-inbox'))
else:
title = request.GET.get('title', "")
to = request.GET.get('to', "")
try:
U = m.User.objects.get(username=to)
except:
U = None
form = f.PmForm(initial= {'to': U, 'subject' : title})
return j2shim.r2r('webview/pm_send.html', {'form' : form}, request)
class addComment(SongView):
"""
Add a comment to a song.
"""
login_required = True
def pre_view(self):
self.redirect(self.song)
def POST(self):
r = check_muted(self.request)
if r: return r
comment = self.request.POST.get("Comment", "").strip()
if comment:
m.SongComment.objects.create(comment = comment, song = self.song, user = self.request.user)
if getattr(settings, "NOTIFY_NEW_SONG_COMMENT", False):
m.send_notification("%s commented on the song <a href='%s'>%s</a>" % (escape(self.request.user.username), self.song.get_absolute_url(), escape(self.song.title)), None, 2)
def site_about(request):
"""
Support for a generic 'About' function
"""
return j2shim.r2r('webview/site-about.html', { }, request)
def chat(request):
"""
Support for a generic 'chat' page
"""
return j2shim.r2r('webview/chat.html', { }, request)
class ListQueue(WebView):
"""
Display the current song, the next songs in queue, and the latest 20 songs in history.
Also provides a way to view DJRandom mood.
"""
template = "queue_list.html"
def set_context(self):
# DJRandom status - - - - - - - --
djrandom_options = m.DJRandomOptions.snapshot ()
mood = djrandom_options.mood
avoid_explicit = djrandom_options.avoid_explicit
mood_form = f.DJRandomMoodForm (initial = {'mood' : mood})
mood_html = mood_form.get_mood_html (set_by = mood.comment)
ae_form = f.DJRandomAvoidExplicitForm (initial = {'avoid_explicit' : avoid_explicit})
ae_html = ae_form.get_avoid_explicit_html (set_by = avoid_explicit.comment)
return {'djrandom_mood_html' : mood_html,
'djrandom_mood_field_html' : mood_form.get_mood_field_html (),
'djrandom_avoid_explicit_html' : ae_html,
'djrandom_avoid_explicit_field_html' : ae_form.get_avoid_explicit_field_html (),
'now_playing' : "",
'history' : common.get_history(),
'queue' : common.get_queue(),
}
# Slightly modified template of list_songs, to show songs via year
def list_year(request, year_id):
songs = m.Song.active.filter (songmetadata__active = True, songmetadata__release_year = year_id).order_by('title')
params = {
'object_list' : songs,
'year' : year_id,
'letter_url_name' : "dv-year"
}
return j2shim.r2r ('webview/year_list.html', params, request)
def list_song(request, song_id):
song = get_object_or_404 (m.Song, id = song_id)
# Simple queries, it is expected that they are evaluated from inside the template only
# .. otherwise cache is quite useless. Just try to keep it simple here
comps = m.Compilation.objects.filter (songs__id = song.id)
remixes = m.Song.active.filter (songmetadata__active = True, songmetadata__remix_of_id = song.id)
def calc_tag_cloud ():
tags = m.Tag.objects.filter (id__in = song.tags).annotate (count = Count ("items"))
return tagging.utils.calculate_cloud (tags)
params = {
'object' : song,
'vote_range': [1, 2, 3, 4, 5],
'comps' : comps,
'remixes' : remixes,
'related_f': (lambda: m.Song.tagged.related_to (song, num = 5)),
'tags_f': calc_tag_cloud
}
return j2shim.r2r ('webview/song_detail.html', params, request)
# This can probbably be made a generic object
def list_screenshot(request, screenshot_id):
screenshot = get_object_or_404(m.Screenshot, id=screenshot_id)
return j2shim.r2r('webview/screenshot_detail.html', { 'object' : screenshot }, request)
class ViewUserFavs(ProfileView):
"""
List the favorites of a user
"""
template = "user_favorites.html"
def set_context(self):
favorites = m.Favorite.objects.filter(user = self.user)
return {'favorites':favorites, 'favuser': self.user}
class MyProfile(WebView):
template = "my_profile.html"
login_required = True
forms = [(f.ProfileForm, "form")]
def initialize(self):
self.profile = common.get_profile(self.request.user)
if self.profile.have_artist():
self.context['lic'] = f.LicenseForm()
self.links = LinkCheck("U", object = self.profile)
def pre_view(self):
rootid = self.request.REQUEST.get("killroot", False)
if rootid and rootid.isdigit():
root = TrustedRoot.objects.get(id=rootid)
if root.openid.user == self.request.user:
root.delete()
return self.redirect("dv-my_profile")
def handle_artistedit(self):
L = f.LicenseForm(self.request.POST)
if L.is_valid():
artist = self.request.user.artist
lic = L.cleaned_data['license']
for song in artist.get_songs():
song.log(self.request.user, "License Mass Change to %s" % lic)
song.license = lic
song.save()
self.redirect("dv-my_profile")
def POST(self):
if self.profile.have_artist() and self.request.POST.get("artistdata"):
self.handle_artistedit()
elif self.forms_valid and self.links.is_valid(self.request.POST):
self.context['form'].save()
self.links.save(self.profile)
self.redirect("dv-my_profile")
def form_form_init(self):
return {'instance': self.profile}
def set_context(self):
return {'profile': self.profile, 'links': self.links}
class ViewProfile(ProfileView):
"""
View a user's profile
"""
template = "view_profile.html"
def set_context(self):
return {'profile': self.profile}
def search(request):
"""
Return the first 40 matches of songs, artists and groups.
"""
if request.method == 'POST' and "Search" in request.POST:
searchterm = request.POST['Search']
result_limit = getattr(settings, 'SEARCH_LIMIT', 40)
if settings.USE_FULLTEXT_SEARCH == True:
users = m.User.objects.filter(username__search = searchterm)[:result_limit]
songs = m.Song.objects.select_related(depth=1).filter(title__search = searchterm)[:result_limit]
artists = m.Artist.objects.filter(handle__search = searchterm)|m.Artist.objects.filter(name__search = searchterm)[:result_limit]
groups = m.Group.objects.filter(name__search = searchterm)[:result_limit]
compilations = m.Compilation.objects.filter(name__search = searchterm)[:result_limit]
labels = m.Label.objects.filter(name__search = searchterm)[:result_limit]
else:
users = m.User.objects.filter(username__icontains = searchterm)[:result_limit]
songs = m.Song.objects.select_related(depth=1).filter(title__icontains = searchterm)[:result_limit]
artists = m.Artist.objects.filter(handle__icontains = searchterm)|m.Artist.objects.filter(name__icontains = searchterm)[:result_limit]
groups = m.Group.objects.filter(name__icontains = searchterm)[:result_limit]
compilations = m.Compilation.objects.filter(name__icontains = searchterm)[:result_limit]
labels = m.Label.objects.filter(name__icontains = searchterm)[:result_limit]
return j2shim.r2r('webview/search.html', \
{ 'songs' : songs, 'artists' : artists, 'groups' : groups, 'users' : users, 'compilations' : compilations, 'labels' : labels }, \
request=request)
return j2shim.r2r('webview/search.html', {}, request=request)
def show_approvals(request):
"""
Shows the most recently approved songs in it's own window
"""
result_limit = getattr(settings, 'UPLOADED_SONG_COUNT', 150)
songs = m.SongApprovals.objects.order_by('-id')[:result_limit]
return j2shim.r2r('webview/recent_approvals.html', { 'songs': songs , 'settings' : settings }, request=request)
class ListArtists(ListByLetter):
template = "artist_list.html"
model = m.Artist
list_title = "Complete Artist List"
letter_url_name = "dv-artists_letter"
all_url_name = "dv-artists"
class ListGroups(ListByLetter):
template = "group_list.html"
model = m.Group
class ListLabels(ListByLetter):
template = "label_list.html"
model = m.Label
class ListComilations(ListByLetter):
template = "compilation_list.html"
model = m.Compilation
list_title = "Complete Compilation / Album / Production List"
letter_url_name = "dv-compilations_letter"
all_url_name = "dv-compilations"
class ListSongs(ListByLetter):
template = "song_list.html"
model = m.Song
list_title = "List Of Songs"
letter_url_name = "dv-songs_letter"
all_url_name = "dv-songs"
class ListScreenshots(ListByLetter):
template = "screenshot_list.html"
model = m.Screenshot
list_title = "Gallery Of Images"
letter_url_name = "dv-screenshots_letter"
all_url_name = "dv-screenshots"
def get_objects(self):
return self.model.objects.filter(status="A")
class ThemeClass(WebView):
def initialize(self):
themeid = self.kwargs['theme_id']
self.context['theme'] = self.theme = get_object_or_404(m.Theme, id=themeid)
class ThemeInfo(ThemeClass):
template = "theme_details.html"
class ThemeEdit(ThemeClass):
template = "theme_edit.html"
forms = [(f.ThemeForm, "form")]
login_required = True
def form_form_init(self):
return {'instance': self.theme}
def POST(self):
if self.forms_valid and self.request.user == self.theme.creator:
self.context['form'].save()
self.redirect(self.context['theme'])
class ThemeAddImage(ThemeClass):
def GET(self):
if self.request.user == self.theme.creator:
return create_screenshot(self.request, self.theme)
self.redirect("/")
class ThemeList(WebView):
template = "themes_list.html"
def get_objects(self):
q = m.Theme.objects.filter (active=True)
q = q.annotate (user_count = Count("userprofile"))
# Add user who didn't care to select a theme
themeless = m.Userprofile.objects.filter (theme = None).count ()
if themeless:
default_theme = m.Theme.objects.all().order_by("-default")
if default_theme:
default_theme = default_theme[0]
for t in q:
if t.id == default_theme.id:
t.user_count += themeless
return q
def POST(self):
id = int(self.request.POST.get("theme_id"))
theme = m.Theme.objects.get(id=id)
if self.request.user.is_authenticated():
p = self.request.user.get_profile()
p.theme = theme
p.save()
self.redirect("dv-themelist")
def set_context(self):
return {"themes": self.get_objects() }
@login_required
def log_out(request):
"""
Show a user a form, and then logs user out if a form is sent in to that address.
"""
if request.method == 'POST':
logout(request)
return HttpResponseRedirect("/")
return j2shim.r2r('webview/logout.html', {}, request=request)
class songHistory(SongView):
"""
List queue history of song
"""
template = "song_history.html"
def set_context(self):
return {'requests': self.song.queue_set.all()}
class songVotes(SongView):
"""
List vote history of song
"""
template = "song_votes.html"
def set_context(self):
return {'votelist': self.song.songvote_set.all()}
class songComments(SongView):
"""
List the comments belonging to a song
"""
template = "song_comments.html"
def set_context(self):
return {'commentlist': self.song.songcomment_set.all()}
def view_compilation(request, comp_id):
"""
Try to view a compilation entry.
"""
permission = request.user.has_perm("webview.make_session")
comp = get_object_or_404(m.Compilation, id=comp_id) # Find it, or return a 404 error
if permission:
sessionform = f.CreateSessionForm()
else:
sessionform = False
if request.method == "POST" and permission:
sessionform = f.CreateSessionForm(request.POST)
if sessionform.is_valid():
desc = sessionform.cleaned_data['description']
playtime = sessionform.cleaned_data['time']
for song in comp.get_songs():
m.Queue.objects.create(song=song, played=False, playtime=playtime, requested_by = request.user, description = desc)
common.get_queue(True)
return redirect("dv-queue")
return j2shim.r2r('webview/compilation.html',
{ 'comp' : comp, 'user' : request.user , 'sessionform': sessionform},
request=request)
class OnelinerHistorySearch(WebView):
template = "oneliner_search.html"
forms = [(f.OnelinerHistory, "form")]
results = []
staff_required = True
def POST(self):
if self.forms_valid:
r = m.Oneliner.objects.all()
data = self.context["form"].cleaned_data
user = data["username"]
if user:
user = m.User.objects.get(username=user)
r = r.filter(user=user)
start = data["start"]
num = data["results"]
self.results = r[start:num+start]
def set_context(self):
return {"results": self.results}
def oneliner(request):
oneliner = m.Oneliner.objects.select_related(depth=1).order_by('-id')[:20]
return j2shim.r2r('webview/oneliner.html', {'oneliner' : oneliner}, \
request=request)
@login_required
def oneliner_submit(request):
"""
Add a text line to the oneliner.
Returns user to referrer position, or to /
"""
message = request.POST.get('Line').strip()
common.add_oneliner(request.user, message)
try:
refer = request.META['HTTP_REFERER']
return HttpResponseRedirect(refer)
except:
return HttpResponseRedirect("/")
@login_required
def list_favorites(request):
"""
Display a user's favorites.
"""
user = request.user
songs = m.Favorite.objects.filter(user=user)
try:
user_profile = m.Userprofile.objects.get(user = user)
use_pages = user_profile.paginate_favorites
except:
# In the event it bails, revert to pages hehe
use_pages = True
if(use_pages):
paginator = Paginator(songs, settings.PAGINATE)
page = int(request.GET.get('page', '1'))
try:
songlist = paginator.page(page)
except (EmptyPage, InvalidPage):
songlist = paginator.page(paginator.num_pages)
return j2shim.r2r('webview/favorites.html', \
{'songs': songlist.object_list, 'page' : page, 'page_range' : paginator.page_range}, \
request=request)
return j2shim.r2r('webview/favorites.html', { 'songs': songs }, request=request)
class QueueSong(AjaxifyView):
redirect_to = "dv-queue"
def handle_form(self, form):
self.r = common.queue_song(self.song, self.request.user)
def make_ajax_return(self):
if self.r:
return HttpResponse("""<span style="display:none">l</span>
<img class="song_tail" src="%slock.png" title="Locked" alt="Locked"/>""" %
settings.MEDIA_URL)
return HttpResponse("")
class ChangeFavorite(AjaxifyView):
redirect_to = "dv-favorites"
def handle_form(self, form):
P = form.get
if P("change") == "remove":
Q = m.Favorite.objects.filter(user = self.request.user, song = self.song)
for x in Q:
x.delete() # For running Favorite.delete() logic
m.send_notification("Song removed from your favorites", self.request.user)
if P("change") == "add":
try:
m.Favorite.objects.create(user = self.request.user, song = self.song)
m.send_notification("Song added to your favorites", self.request.user)
except:
pass
def make_ajax_return(self):
s = "{{ display.favorite(song, user) }}"
c = {'song': self.song, 'user': self.request.user}
return HttpResponse(j2shim.render_string(s, c))
class VoteSong(AjaxifyView):
redirect_to = "dv-root"
@atomic("vote")
def handle_form(self, form):
self.int_vote = int(form.get("vote", form.get("ajaxvote")))
if self.int_vote <= 5 and self.int_vote > 0:
self.song.set_vote(self.int_vote, self.request.user)
def make_ajax_return(self):
s = "{{ display.song_vote(song, value) }}"
c = {'song': self.song, 'value': self.int_vote}
return HttpResponse(j2shim.render_string(s, c))
class LinkCheck(object):
def __init__(self, linktype, object = None, status = 0, user = None, add=False):
self.type = linktype
self.add = add
self.verified = []
self.user = user
self.status = status
self.object = object
self.valid = False
self.get_list()
self.title = "External Resources"
def get_link_for(self, o, generic):
if not o or not generic:
return None
bla = ContentType.objects.get_for_model(o)
r = m.GenericLink.objects.filter(content_type__pk=bla.id, object_id=o.id, link=generic)
return r and r[0] or None
def get_list(self):
self.linklist = m.GenericBaseLink.objects.filter(linktype = self.type)
r = []
for x in self.linklist:
val = self.get_link_for(self.object, x)
value=val and val.value or ""
r.append({'link': x, 'value': value, "error": "", "comment": ""})
self.links = r
return self.linklist
def __unicode__(self):
return self.as_table()
def as_table(self):
"""
Print links form as table
"""
return j2shim.r2s('webview/t/linksform.html', \
{'links': self.links, 'title': self.title })
def is_valid(self, postdict):
"""
Check if given links are valid according to given regex
"""
self.valid = True
for entry in self.links:
l = entry['link'] # GenericBaseLink object
key = "LL_%s" % l.id
if postdict.has_key(key):
val = postdict[key].strip()
if val:
ckey = key+"_comment"
comment = postdict.has_key(ckey) and postdict[ckey].strip() or ""
#Fill out dict in case it needs to be returned to user
entry['value'] = val
entry['comment'] = comment
if re.match(l.regex + "$", val):
self.verified.append((l, val, comment)) #Add to approved list
else:
self.valid = False
entry['error'] = "The input did not match expected value"
else:
self.verified.append((l, "", "")) #No value for this link
return self.valid
def save(self, obj):
"""
Save links to database
"""
if self.verified and self.valid:
for l, val, comment in self.verified:
r = self.get_link_for(obj, l)
if val:
if r and not self.add:
r.value = val
r.save()
else:
m.GenericLink.objects.create(
content_object=obj,
value=val,
link=l,
status = self.status,
comment = comment,
user = self.user
)
else:
if r and not self.add:
r.delete()
obj.save() # For caching
@permission_required('webview.change_songmetadata')
def new_songinfo_list(request):
alink = request.GET.get("alink", False)
status = request.GET.get("status", False)
if alink and status.isdigit():
link = get_object_or_404(m.GenericLink, id=alink)
link.status = int(status)
link.content_object.save()
link.save()
nusonginfo = m.SongMetaData.objects.filter(checked=False).order_by('added') # Oldest info events will be shown first
nulinkinfo = m.GenericLink.objects.filter(status=1)
c = {'metainfo': nusonginfo, 'linkinfo': nulinkinfo}
return j2shim.r2r("webview/list_newsonginfo.html", c, request)
@permission_required('webview.change_songmetadata')
def list_songinfo_for_song(request, song_id):
song = get_object_or_404(m.Song, id=song_id)
metalist = m.SongMetaData.objects.filter(song=song)
c = {'metalist':metalist, 'song': song}
return j2shim.r2r("webview/list_songinfo.html", c, request)
@login_required
def add_songlinks(request, song_id):
song = get_object_or_404(m.Song, id=song_id)
links = LinkCheck("S", status=1, user = request.user, add = True)
if request.method == "POST":
if links.is_valid(request.POST):
links.save(song)
return redirect(song)
c = {'song': song, 'links': links}
return j2shim.r2r("webview/add_songlinks.html", c, request)
@permission_required('webview.change_songmetadata')
def view_songinfo(request, songinfo_id):
meta = get_object_or_404(m.SongMetaData, id=songinfo_id)
post_ok = getattr(settings, 'ADMIN_EMAIL_ON_INFO_APPROVE', False) # Do we send an email on info approve?
if request.method == "POST":
if request.POST.has_key("activate") and request.POST["activate"]:
if post_ok :
if not meta.checked and meta.user:
meta.user.get_profile().send_message(
subject="Song info approved",
message="Your metadata for song [song]%s[/song] is now active :)" % meta.song.id,
sender = request.user
)
meta.song.log(request.user, "Approved song metadata")
meta.set_active()
if request.POST.has_key("deactivate") and request.POST["deactivate"]:
if not meta.checked and meta.user:
meta.user.get_profile().send_message(
subject="Song info not approved",
message="Your metadata for song [song]%s[/song] was not approved :(" % meta.song.id,
sender = request.user
)
meta.checked = True
meta.song.log(request.user, "Rejected metadata %s" % meta.id)
meta.save()
c = {'meta': meta }
return j2shim.r2r("webview/view_songinfo.html", c, request)
#Not done
class editSonginfo(SongView):
template = "edit_songinfo.html"
forms = [f.EditSongMetadataForm, "form"]
login_required = True
def form_form_init(self):
if self.method == "POST":
meta = m.SongMetaData(song=self.song, user=self.request.user)
else:
meta = self.song.get_metadata()
meta.comment = ""
return {'instance': meta}
def POST(self):
if self.forms_valid:
self.context['form'].save()
self.redirect(self.context['song'])
@login_required
def edit_songinfo(request, song_id):
song = get_object_or_404(m.Song, id=song_id)
meta = song.get_metadata()
meta.comment = ""
form2 = False
if (request.user.get_profile().have_artist() and request.user.artist in meta.artists.all()) or (request.user.is_staff):
form2 = f.SongLicenseForm(instance=song)
if request.method == "POST":
meta = m.SongMetaData(song=song, user=request.user)
if form2 and request.POST.get("special") == "licchange":
form2 = f.SongLicenseForm(request.POST, instance=song)
if form2.is_valid():
s = form2.save()
song.log(request.user, "Changed song license to %s" % s.license)
return redirect(song)
else:
form = f.EditSongMetadataForm(request.POST, instance=meta)
if form.is_valid():
form.save()
return redirect(song)
else:
form = f.EditSongMetadataForm(instance=meta)
c = {'form': form, 'song': song, 'form2': form2}
return j2shim.r2r("webview/edit_songinfo.html", c, request)
@login_required
def upload_song(request, artist_id):
# Check to see if Uploading is currently disabled
DisableUploads = getattr(settings, 'DISABLE_UPLOADS', False)
if DisableUploads:
# Uploads are currently disabled in the system
return HttpResponseRedirect(reverse('dv-queue'))
artist = get_object_or_404(m.Artist, id=artist_id)
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_UPLOADS', 0)
artist_auto_approve = getattr(settings, 'ARTIST_AUTO_APPROVE_UPLOADS', 1)
links = LinkCheck("S", user = request.user)
# Quick test to see if the artist is currently active. If not, bounce
# To the current queue!
if artist.status != 'A':
return HttpResponseRedirect(reverse('dv-queue'))
if request.method == 'POST':
if artist_auto_approve and artist.link_to_user == request.user:
# Auto Approved Song. Set Active, Add to Recent Uploads list
status = 'A'
else:
status = 'U'
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
a = m.Song(uploader = request.user, status = status)
form = f.UploadForm(request.POST, request.FILES, instance = a)
infoform = f.SongMetadataForm(request.POST)
if links.is_valid(request.POST) and form.is_valid() and infoform.is_valid():
new_song = form.save(commit=False)
new_song.save()
songinfo = infoform.save(commit=False)
songinfo.user = request.user
songinfo.song = new_song
songinfo.checked = True
songinfo.save()
infoform.save_m2m()
form.save_m2m()
songinfo.artists.add(artist)
songinfo.set_active()
links.save(new_song)
if(new_song.status == 'A'):
# Auto Approved!
try:
# If the song entry exists, we shouldn't care
exist = m.SongApprovals.objects.get(song = new_song)
except:
# Should throw when the song isn't found in the DB
Q = m.SongApprovals(song = new_song, approved_by=request.user, uploaded_by=request.user)
Q.save()
return HttpResponseRedirect(new_song.get_absolute_url())
else:
form = f.UploadForm()
infoform = f.SongMetadataForm()
return j2shim.r2r('webview/upload.html', \
{'form' : form, 'infoform': infoform, 'artist' : artist, 'links': links }, \
request=request)
@permission_required('webview.change_song')
def activate_upload(request):
if "song" in request.GET and "status" in request.GET:
songid = int(request.GET['song'])
status = request.GET['status']
song = m.Song.objects.get(id=songid)
url = m.Site.objects.get_current()
if status == 'A':
stat = "Accepted"
song.status = "A"
song.log(request.user, "Approved song")
if status == 'R':
stat = "Rejected"
song.status = 'R'
song.log(request.user, "Rejected song")
# This used to be propriatary, it is now a template. AAK
mail_tpl = loader.get_template('webview/email/song_approval.txt')
c = Context({
'songid' : songid,
'song' : song,
'site' : m.Site.objects.get_current(),
'stat' : stat,
'url' : url,
})
song.save()
# Only add if song is approved! Modified to check to see if song exists first!
# There is probbably a better way of doing this crude check! AAK
if(status == 'A'):
try:
# If the song entry exists, we shouldn't care
exist = m.SongApprovals.objects.get(song = song)
except:
# Should throw when the song isn't found in the DB
Q = m.SongApprovals(song=song, approved_by=request.user, uploaded_by=song.uploader)
Q.save()
if getattr(settings, "NOTIFY_NEW_SONG_APPROVED", False):
m.send_notification("Song <a href='%s'>%s</a> was accepted and is now avaliable for queuing!" % (
song.get_absolute_url(),
escape(song.title),
), None, 2)
if song.uploader.get_profile().pm_accepted_upload and status == 'A' or status == 'R':
song.uploader.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Song Upload Status Changed To: %s" % stat
)
songs = m.Song.objects.filter(status = "U").order_by('added')
return j2shim.r2r('webview/uploaded_songs.html', {'songs' : songs}, request=request)
def showRecentChanges(request):
# Get some default stat values
artist_limit = getattr(settings, 'RECENT_ARTIST_VIEW_LIMIT', 20)
song_limit = getattr(settings, 'RECENT_SONG_VIEW_LIMIT', 20)
label_limit = getattr(settings, 'RECENT_LABEL_VIEW_LIMIT', 20)
group_limit = getattr(settings, 'RECENT_GROUP_VIEW_LIMIT', 20)
comp_limit = getattr(settings, 'RECENT_COMP_VIEW_LIMIT', 20)
# Make a list of stuff needed for the stats page
songlist = m.Song.objects.order_by('-songmetadata__added')[:song_limit]
artistlist = m.Artist.objects.order_by('-last_updated')[:artist_limit]
labellist = m.Label.objects.order_by('-last_updated')[:label_limit]
grouplist = m.Group.objects.order_by('-last_updated')[:group_limit]
complist = m.Compilation.objects.order_by('-last_updated')[:comp_limit]
# And now return this as a template. default page cache is 5 minutes, which is ample enough
# To show real changes, without stressing out the SQL loads
return j2shim.r2r('webview/recent_changes.html', {'songs' : songlist, 'artists' : artistlist, 'groups' : grouplist,
'labels' : labellist, 'compilations' : complist}, request=request)
class UsersOverview (WebView):
template = "users_overview.html"
def set_context (self):
limit = 50
country_stats_q = m.User.objects.values ("userprofile__country")
country_stats_q = country_stats_q.annotate (count = Count("pk"))
country_stats_q = country_stats_q.order_by ('-count', "userprofile__country")
by_votes_q = m.User.objects.values ("username", 'userprofile__country')
by_votes_q = by_votes_q.annotate (count = Count("songvote"), avg = Avg('songvote__vote'))
by_votes_q = by_votes_q.order_by ('-count')
by_votes_q = by_votes_q [:limit]
by_oneliner_q = m.User.objects.values ("username", 'userprofile__country')
by_oneliner_q = by_oneliner_q.annotate (count = Count("oneliner"))
by_oneliner_q = by_oneliner_q.order_by ('-count')
by_oneliner_q = by_oneliner_q [:limit]
by_uploads_q = m.SongApprovals.objects.values ("uploaded_by__username", 'uploaded_by__userprofile__country')
by_uploads_q = by_uploads_q.annotate (count = Count("pk"))
by_uploads_q = by_uploads_q.order_by ('-count')
by_uploads_q = by_uploads_q [:limit]
by_tagging_q = m.TagHistory.objects.values ("user__username", 'user__userprofile__country')
by_tagging_q = by_tagging_q.annotate (count = Count("pk"))
by_tagging_q = by_tagging_q.order_by ('-count')
by_tagging_q = by_tagging_q [:limit]
by_requester_q = m.Queue.objects.values ("requested_by__username", 'requested_by__userprofile__country')
by_requester_q = by_requester_q.annotate (count = Count("pk"), avg = Avg ("song__rating"))
by_requester_q = by_requester_q.order_by ('-count')
by_requester_q = by_requester_q [:limit]
by_comments_q = m.SongComment.objects.values ("user__username", 'user__userprofile__country')
by_comments_q = by_comments_q.annotate (count = Count("pk"))
by_comments_q = by_comments_q.order_by ('-count')
by_comments_q = by_comments_q [:limit]
by_posts_q = fm.Post.objects.values ("author__username", 'author__userprofile__country')
by_posts_q = by_posts_q.annotate (count = Count("pk"))
by_posts_q = by_posts_q.order_by ('-count')
by_posts_q = by_posts_q [:limit]
# We can return queries, since they are lazy. It is supposed that access is cached in html
return {'by_votes_q' : by_votes_q,
'by_oneliner_q' : by_oneliner_q,
'by_requester_q' : by_requester_q,
'by_comments_q' : by_comments_q,
'by_posts_q' : by_posts_q,
'by_tagging_q' : by_tagging_q,
'by_uploads_q' : by_uploads_q,
'country_stats_q' : country_stats_q}
class RadioOverview (WebView):
# This is supposed to be cached both on HTML level (to avoid overheads on HTML rendering)
# and on code level to avoid set_context method overheads
template = "radio_overview.html"
@cached_method (key = "RadioOverview-get_total_played_length", timeout = 60)
def get_total_played (self):
q = m.Song.active.extra (
select = {"total_played_length" : "sum(song_length * times_played)",
"total_times_played" : "sum(times_played)"})
return list (q.values ("total_played_length", "total_times_played")) [0]
@cached_method (key = "RadioOverview-stats_by_status", timeout = 60)
def list_stats_by_status (self):
return self.__list_grouped_by (m.Song.objects, 'status')
@cached_method (key = "RadioOverview-votes_by_status", timeout = 60)
def list_votes_stats (self):
return self.__list_grouped_by (m.Song.active, 'rating_votes', limit = 6)
@cached_method (key = "RadioOverview-source_stats", timeout = 60)
def list_source_stats (self):
type_by_id = {None : m.Struct (title = "----------------")}
for type in m.SongType.objects.all():
type_by_id [type.id] = type
stats = self.__list_grouped_by (m.Song.active.filter (songmetadata__active = True),
'songmetadata__type')
for stat in stats:
stat ['source'] = type_by_id [stat['songmetadata__type']].title
return stats
@cached_method (key = "RadioOverview-country_stats", timeout = 86400)
def list_country_stats (self):
return self.__list_grouped_by (
m.Song.active.filter (songmetadata__active = True),
'songmetadata__artists__home_country',
order_by = ['-count', 'songmetadata__artists__home_country'])
@cached_method (key = "RadioOverview-set_context", timeout = 60)
def set_context (self):
# Overview
stats_by_status = self.list_stats_by_status ()
total_songs = 0
total_length = 0
unlocked_songs = 0
unlocked_length = 0
status_dict = dict (m.Song.STATUS_CHOICES)
for stat in stats_by_status:
stat ['status'] = status_dict [stat ['status']]
total_songs += stat ['count']
total_length += stat ['total_playtime']
unlocked_songs += stat ['unlocked_count']
unlocked_length += stat ['unlocked_playtime']
# Result
return {'vote_stats' : self.list_votes_stats (),
"stats_by_status" : stats_by_status,
"source_stats" : self.list_source_stats (),
"country_stats" : self.list_country_stats (),
'total_length' : total_length,
'total_songs' : total_songs,
'unlocked_length' : unlocked_length,
'unlocked_songs' : unlocked_songs,
'total_played' : self.get_total_played ()}
def __list_grouped_by (self, qmanager, field, limit = None, order_by = None):
# It is hard or impossible to write that with current django without issuing two queries
# because django doesn't support expressions in annotations...
def qfiltered (f = None):
q = qmanager
if f:
q = q.filter (f)
q = q.values (field)
q = q.annotate (count = Count("pk"), total_playtime = Sum('song_length'))
if order_by:
q = q.order_by (*order_by)
else:
q = q.order_by (field)
if limit:
return q [:limit]
else:
return q.all ()
# Get total
by_field = {}
stats = qfiltered ()
for stat in stats:
by_field [stat[field]] = stat
stat ['unlocked_count'] = 0
stat ['unlocked_playtime'] = 0
# Mix-in playable stats
for pstat in qfiltered (m.Song.unlocked_condition()):
fieldv = pstat [field]
if fieldv in by_field:
stat = by_field [fieldv]
stat ['unlocked_count'] = pstat ['count']
stat ['unlocked_playtime'] = pstat ['total_playtime']
# Force evaluation, otherwise django's cache doesn't cache it at all! :E
return list (stats)
class RadioStatus(WebView):
template = "stat_songs.html"
def list_favorites(self):
return m.Song.objects.order_by('-num_favorited')
def list_voted(self):
limit = getattr(settings, "RADIO_STATUS_VOTED_MIN_VOTES", 1)
return m.Song.objects.filter(rating_votes__gt = limit - 1).order_by('-rating','-rating_votes')
def list_leastvotes (self):
return m.Song.objects.filter (m.Song.unlocked_condition ()).order_by ('rating_votes', '?')[:100]
def list_forgotten (self):
q = m.Song.active.filter (m.Song.unlocked_condition ())
q = q.annotate (last_requested = Max("queue__requested"))
q = q.order_by ('last_requested')
q = q[:100]
return q
def list_random(self):
max_id = m.Song.objects.order_by('-id')[0].id
max_songs = m.Song.objects.filter(status="A").count()
num_songs = 100
num_songs = num_songs < max_songs and num_songs or max_songs
songlist = []
r_done = []
r = random.randint(0, max_id+1)
while len(songlist) < num_songs:
r_list = []
curr_count = (num_songs - len(songlist) + 2)
for x in range(curr_count):
while r in r_done:
r = random.randint(0, max_id+1)
r_list.append(r)
r_done.extend(r_list)
songlist.extend([s for s in m.Song.objects.filter(id__in=r_list, status="A")])
return songlist
def list_mostvotes(self):
return m.Song.objects.order_by('-rating_votes')
def list_queued2(self):
return m.Song.objects.filter(m.Song.unlocked_condition()).order_by('times_played', 'locked_until')
def list_queued(self):
return m.Song.objects.filter(status="A").order_by('-times_played')
def initialize(self):
self.stats = {
'random': ("A selection of random songs from the database!",
"rating_votes",
"# Votes",
self.list_random),
'leastvotes': ("Songs with the least number of votes in the database.",
"rating_votes",
"# Votes",
self.list_leastvotes),
'forgotten': ("Songs which have not been played in a long time (or not al all).",
"times_played",
"# Plays",
self.list_forgotten),
'favorites': ("Songs which appear on more users favourite lists.",
"num_favorited",
"# Faves",
self.list_favorites),
'voted': ("The highest rated songs in the database.",
"rating",
"Rating",
self.list_voted),
'queued': ("The most played songs in the database.",
"times_played",
"# Plays",
self.list_queued),
'unplayed': ("The least played songs in the database.",
"times_played",
"# Plays",
self.list_queued2),
'mostvotes': ("Songs with the highest number of votes cast.",
"rating_votes",
"# Votes",
self.list_mostvotes),
}
self.stattype = self.kwargs.get("stattype", "")
def set_context(self):
if self.stattype in self.stats.keys():
title, stat, name, songs = self.stats[self.stattype]
return {'songs': songs()[:100],
'title': title,
'numsongs': 100,
'stat': stat,
'name': name}
self.template = "radio_status.html"
return {'keys' : self.stats}
class HelpusWithArtists (ListArtists):
list_title = "Artists with incorrect/missing information"
letter_url_name = "dv-helpus-artist_letter"
all_url_name = "dv-helpus-artist"
condition = ~DQ (home_country__in = m.country_codes2, status = 'A')
condition |= DQ (artist_pic = '', status = 'A')
def get_objects (self):
return self.model.objects.filter (self.condition)
def desc_function (self, artist):
"""Describe what is wrong with an artist."""
problems = []
if artist.status == 'A':
country_lc = artist.home_country.lower()
if country_lc == "":
problems.append (_("no country"))
elif country_lc not in m.country_codes2:
problems.append (_("unknown country (" + artist.home_country + ")"))
if artist.artist_pic == "":
problems.append (_("no picture"))
if problems:
problems = ", ".join (problems)
problems = problems[0].upper() + problems[1:]
return " - " + problems + "."
else:
# WTF? why are we here then?
return ""
class HelpusWithSongs (ListSongs):
list_title = "Songs with problems"
letter_url_name = "dv-helpus-song_letter"
all_url_name = "dv-helpus-song"
# Kaput
condition = DQ (status = 'K')
# Active but no compilation
condition |= DQ (status = 'A',
compilationsonglist = None,
songmetadata__active = True,
songmetadata__type__compilation_expected = True)
# No source (song type)
condition |= DQ (status = 'A',
songmetadata__type = None,
songmetadata__active = True)
def get_objects (self):
q = self.model.objects.filter (self.condition)
q = q.annotate (comps_count = Count("compilationsonglist__pk"))
# I hate that but until it is not django 1.4 we can't do better
q = q.extra (select = {'compilation_expected' : '`webview_songtype`.`compilation_expected`',
'songtype' : '`webview_songtype`.`id`'})
return q
def desc_function (self, song):
"""Describe what is wrong with an artist."""
problems = []
if song.status == 'K':
problems.append ("bad status")
if song.compilation_expected and song.comps_count == 0 and song.status == 'A':
problems.append ("no compilations")
if song.status == 'A' and song.songtype == None:
problems.append ("no source")
if problems:
problems = ", ".join (problems)
problems = problems[0].upper() + problems[1:]
return problems
else:
# WTF? why are we here then?
return ""
class HelpusWithComps (ListComilations):
list_title = "Compilations with problems"
letter_url_name = "dv-helpus-comp_letter"
all_url_name = "dv-helpus-comp"
def get_objects (self):
# That is the only way.. ;( Django's contenttype magic inserts content_type_id=29 into where clause
# making it impossible to filter screenshots=None, so we have to use inner join
active_and_with_image_q = self.model.objects.filter (status = 'A', screenshots__image__status = 'A')
# Active and without an image
condition = DQ (status = 'A') & ~DQ (pk__in = active_and_with_image_q)
# Active and no songs (messed up via admin interface or songs are deleted...)
condition |= DQ (status = 'A', songs = None)
q = self.model.objects.filter (condition)
q = q.annotate (screenshots_count = Count("screenshots"),
songs_count = Count ("songs"))
return q
def desc_function (self, comp):
"""Describe what is wrong with the compilation."""
problems = []
if comp.status == 'A':
if comp.screenshots_count == 0:
problems.append (_("no cover image"))
if comp.songs_count == 0:
problems.append (_("no songs"))
if problems:
problems = ", ".join (problems)
problems = problems[0].upper() + problems[1:]
return " - " + problems + "."
else:
# WTF? why are we here then?
return ""
class HelpusWithScreenshots (ListScreenshots):
list_title = "Images with problems"
letter_url_name = "dv-helpus-screenshot_letter"
all_url_name = "dv-helpus-screenshot"
# Connected to nothing
condition = DQ (status = 'A', screenshotobjectlink = None)
def get_objects (self):
q = self.model.objects.filter (self.condition)
q = q.annotate (slink_count = Count("screenshotobjectlink"))
return q
def desc_function (self, scr):
"""Describe what is wrong with the screenshot."""
problems = []
if scr.status == 'A':
if scr.slink_count == 0:
problems.append (_("connected to nothing"))
if problems:
problems = ", ".join (problems)
problems = problems[0].upper() + problems[1:]
return " - " + problems + "."
else:
# WTF? why are we here then?
return ""
class TagCloud(WebView):
template = "tag_cloud.html"
cache_key = "tag_cloud"
cache_duration = 24*60*60
def get_cache_key(self):
tag_id = cache.get("tagver", 0)
key = "tag_cloud_%s" % tag_id
return key
def set_cached_context(self):
min_count = getattr(settings, 'TAG_CLOUD_MIN_COUNT', 1)
tags = m.Song.tags.cloud(min_count=min_count)
return {'tags': tags}
class MuteOneliner(WebView):
template = "oneliner_mute.html"
forms = [
(f.MuteOnelinerForm, "banform"),
]
def check_permissions(self):
return self.request.user.has_perm("webview.add_mute_oneliner")
def POST(self):
if self.forms_valid:
data = self.context["banform"].cleaned_data
user = data["username"]
endtime = datetime.datetime.now() + datetime.timedelta(minutes=data["mute_minutes"])
entry = m.OnelinerMuted(
user=user,
muted_to=endtime,
reason=data["reason"],
added_by=self.request.user,
details=data["details"],
)
if data["ban_ip"]:
profile = user.get_profile()
if profile.last_ip:
entry.ip_ban = profile.last_ip
entry.save()
if getattr(m.settings, "BAN_ANNOUNCE", False):
m.send_notification("User '%s' have been silenced for %s minutes. Reason: %s" % (user.username,data["mute_minutes"], data["reason"]), None)
user.get_profile().log(self.request.user, "Silenced for %s minutes. Reason: %s" % (data["mute_minutes"], data["reason"]))
self.redirect("dv-muteoneliner")
def set_context(self):
active = m.OnelinerMuted.objects.filter(muted_to__gt=datetime.datetime.now())
history = m.OnelinerMuted.objects.filter(muted_to__lt=datetime.datetime.now())[:10]
return {"active": active, "history": history}
class TagDetail(WebView):
template = "tag_detail.html"
cache_duration = 24 * 60 * 60
def get_cache_key(self):
tag_id = cache.get ("tagver", 0)
key = "tagdetail_%s_%s" % (self.kwargs.get("tag", ""), tag_id)
return hashlib.md5(key).hexdigest()
def set_cached_context(self):
tag = self.kwargs.get ("tag", "")
songs = TaggedItem.objects.get_by_model (m.Song, tag)
related = m.quickly_get_related_tags (songs,
exclude_tags_str = tag,
limit_to_model = m.Song,
count = True)
related = tagging.utils.calculate_cloud (related)
return {'songs' : songs,
'related' : related,
'tag' : tag}
class TagEdit(SongView):
login_required=True
template = "tag_edit.html"
def POST(self):
t = self.request.POST.get('tags', "")
self.song.tags = re.sub(r'[^a-zA-Z0-9!_\-?& ]+', '', t)
self.song.log(self.request.user, "Edited tags")
self.song.save() # For updating the "last changed" value
m.TagHistory.objects.create(user=self.request.user, song=self.song, tags = self.request.POST['tags'])
try:
cache.incr("tagver")
except:
cache.set("tagver", 1)
return self.redirect(self.song)
def set_context(self):
tags = tagging.utils.edit_string_for_tags(self.song.tags)
changes = m.TagHistory.objects.filter(song=self.song).order_by('-id')[:5]
return {'tags': tags, 'changes': changes}
@login_required
def create_artist(request):
"""
Simple form to allow registereed users to create a new artist entry.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_ARTIST', 0)
links = LinkCheck("A")
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'U'
a = m.Artist(created_by = request.user, status = status)
form = f.CreateArtistForm(request.POST, request.FILES, instance = a)
if form.is_valid() and links.is_valid(request.POST):
new_artist = form.save(commit=False)
new_artist.save()
form.save_m2m()
links.save(new_artist)
return HttpResponseRedirect(new_artist.get_absolute_url())
else:
form = f.CreateArtistForm()
return j2shim.r2r('webview/create_artist.html', \
{'form' : form, 'links': links }, \
request=request)
@permission_required('webview.change_artist')
def activate_artists(request):
"""
Shows the most recently added artists who have a 'U' status in their upload marker
"""
if "artist" in request.GET and "status" in request.GET:
artistid = int(request.GET['artist'])
status = request.GET['status']
artist = m.Artist.objects.get(id=artistid)
url = m.Site.objects.get_current() # Pull this into a variable
if status == 'A':
stat = "Accepted"
artist.log(request.user, "Activated artist")
artist.status = "A"
if status == 'R':
stat = "Rejected"
artist.log(request.user, "Rejected artist")
artist.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/artist_approval.txt')
c = Context({
'artist' : artist,
'site' : m.Site.objects.get_current(),
'stat' : stat,
'url' : url,
})
artist.save()
# Send the email to inform the user of their request status
if artist.created_by.get_profile().email_on_artist_add and status == 'A' or status == 'R':
artist.created_by.get_profile().send_message(sender = request.user,
message = mail_tpl.render(c),
subject = u"Artist %s : %s" % (artist.handle, stat)
)
artists = m.Artist.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_artists.html', { 'artists': artists }, request=request)
@login_required
def create_group(request):
"""
Simple form to allow registereed users to create a new group entry.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_GROUP', 0)
links = LinkCheck("G")
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'U'
if request.method == 'POST':
g = m.Group(created_by = request.user, status = status)
form = f.CreateGroupForm(request.POST, request.FILES, instance = g)
if form.is_valid() and links.is_valid(request.POST):
new_group = form.save(commit=False)
new_group.save()
form.save_m2m()
links.save(new_group)
return HttpResponseRedirect(new_group.get_absolute_url())
else:
form = f.CreateGroupForm()
return j2shim.r2r('webview/create_group.html', \
{'form' : form, 'links': links }, \
request=request)
@permission_required('webview.change_group')
def activate_groups(request):
"""
Shows the most recently added groups who have a 'U' status in their upload marker
"""
if "group" in request.GET and "status" in request.GET:
groupid = int(request.GET['group'])
status = request.GET['status']
group = m.Group.objects.get(id=groupid)
if status == 'A':
stat = "Accepted"
group.status = "A"
if status == 'R':
stat = "Rejected"
group.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/group_approval.txt')
c = Context({
'group' : group,
'site' : m.Site.objects.get_current(),
'stat' : stat,
})
group.save()
# Send the email to inform the user of their request status
if group.created_by.get_profile().email_on_group_add and status == 'A' or status == 'R':
group.created_by.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Group Request Status Changed To: %s" % stat
)
groups = m.Group.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_groups.html', { 'groups': groups }, request=request)
@permission_required('webview.change_compilation')
def activate_compilations(request):
"""
Shows the most recently added compilations who have a 'U' status in their upload marker
"""
if "compilation" in request.GET and "status" in request.GET:
compilationid = int(request.GET['compilation'])
status = request.GET['status']
compilation = m.Compilation.objects.get(id=compilationid)
if status == 'A':
stat = "Accepted"
compilation.status = "A"
if status == 'R':
stat = "Rejected"
compilation.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/compilation_approval.txt')
c = Context({
'compilation' : compilation,
'site' : m.Site.objects.get_current(),
'stat' : stat,
})
compilation.save()
# Send the email to inform the user of their request status
if compilation.created_by.get_profile().email_on_group_add and status == 'A' or status == 'R':
compilation.created_by.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Compilation Request Status Changed To: %s" % stat
)
compilations = m.Compilation.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_compilations.html', { 'compilations': compilations }, request=request)
@login_required
def create_label(request):
"""
Simple form to allow registereed users to create a new label entry.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_LABEL', 0)
links = LinkCheck("L")
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'U'
if request.method == 'POST':
l = m.Label(created_by = request.user, status = status)
form = f.CreateLabelForm(request.POST, request.FILES, instance = l)
if form.is_valid() and links.is_valid(request.POST):
new_label = form.save(commit=False)
new_label.save()
form.save_m2m()
links.save(new_label)
return HttpResponseRedirect(new_label.get_absolute_url())
else:
form = f.CreateLabelForm()
return j2shim.r2r('webview/create_label.html', \
{'form' : form, 'links': links }, \
request=request)
@permission_required('webview.change_label')
def activate_labels(request):
"""
Shows the most recently added labels who have a 'U' status in their upload marker
"""
if "label" in request.GET and "status" in request.GET:
labelid = int(request.GET['label'])
status = request.GET['status']
this_label = m.Label.objects.get(id=labelid)
if status == 'A':
stat = "Accepted"
this_label.status = "A"
if status == 'R':
stat = "Rejected"
this_label.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/label_approval.txt')
c = Context({
'label' : this_label,
'site' : m.Site.objects.get_current(),
'stat' : stat,
})
this_label.save()
# Send the email to inform the user of their request status
if this_label.created_by.get_profile().email_on_group_add and status == 'A' or status == 'R':
this_label.created_by.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Label Request Status Changed To: %s" % stat
)
labels = m.Label.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_labels.html', { 'labels': labels }, request=request)
@login_required
def create_screenshot(request, obj=None):
"""
Simple form to allow registereed users to create a new screenshot entry.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_SCREENSHOT', 0)
error=""
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'U'
if request.method == 'POST':
new_screenshot = None
l = m.Screenshot(added_by = request.user, status = status)
form = f.CreateScreenshotForm(request.POST, request.FILES, instance = l)
form2 = f.GenericInfoForm(request.POST)
if form2.is_valid():
connectval = request.POST.get("connectto")
ct = form2.cleaned_data['content_type']
id = form2.cleaned_data['object_id']
# User links existing screenshot instead of creating new.
if connectval:
try:
if connectval.isdigit():
new_screenshot = m.Screenshot.objects.get(id=connectval)
else:
new_screenshot = m.Screenshot.objects.get(name=connectval)
if not new_screenshot.is_active():
error = "'{0}' is not active! Get an admin to approve it.".format(connectval)
new_screenshot = None
else:
m.ScreenshotObjectLink.objects.create(content_type=ct, object_id=id, image=new_screenshot)
new_screenshot.save()
except:
error = "Screenshot not found!"
if not connectval and form.is_valid():
new_screenshot = form.save(commit=False)
new_screenshot.save()
form.save_m2m()
m.ScreenshotObjectLink.objects.create(content_type=ct, object_id=id, image=new_screenshot)
# Generate a request for the thumbnail
new_screenshot.create_thumbnail()
new_screenshot.save()
# Leave this place :)
if new_screenshot:
return HttpResponseRedirect(new_screenshot.get_absolute_url())
else:
if obj:
ct = ContentType.objects.get_for_model(obj.__class__)
i = {'content_type': ct, 'object_id': obj.id }
else:
i = {}
form = f.CreateScreenshotForm()
form2 = f.GenericInfoForm(initial=i)
return j2shim.r2r('webview/create_screenshot.html', \
{'form' : form, 'form2': form2, "obj":obj, 'error':error }, \
request=request)
@permission_required('webview.change_screenshot')
def activate_screenshots(request):
"""
Shows the most recently added labels who have a 'U' status in their upload marker
"""
if "screenshot" in request.GET and "status" in request.GET:
screenshotid = int(request.GET['screenshot'])
status = request.GET['status']
this_screenshot = m.Screenshot.objects.get(id=screenshotid)
url = m.Site.objects.get_current()
if status == 'A':
stat = "Accepted"
this_screenshot.status = "A"
if status == 'R':
stat = "Rejected"
this_screenshot.status = 'R'
# Prepare a mail template to inform user of the status of their request
mail_tpl = loader.get_template('webview/email/screenshot_approval.txt')
c = Context({
'screenshot' : this_screenshot,
'site' : m.Site.objects.get_current(),
'stat' : stat,
'url' : url,
})
this_screenshot.save()
# Send the email to inform the user of their request status
if this_screenshot.added_by.get_profile().email_on_group_add and status == 'A' or status == 'R':
this_screenshot.added_by.get_profile().send_message(
sender = request.user,
message = mail_tpl.render(c),
subject = "Screenshot Request Status Changed To: %s" % stat
)
screenshots = m.Screenshot.objects.filter(status = "U").order_by('last_updated')
return j2shim.r2r('webview/pending_screenshots.html', { 'screenshots': screenshots }, request=request)
@permission_required('webview.change_screenshot')
def rebuild_thumb(request, screenshot_id):
screenshot = get_object_or_404(m.Screenshot, id=screenshot_id) #m.Screenshot.objects.get(id=screenshot_id) #get_object_or_404(m.Screenshot, id=screenshot_id)
screenshot.create_thumbnail()
screenshot.save()
return j2shim.r2r('webview/screenshot_detail.html', { 'object' : screenshot }, request)
def users_online(request):
timefrom = datetime.datetime.now() - datetime.timedelta(minutes=5)
userlist = m.Userprofile.objects.filter(last_activity__gt=timefrom).order_by('user__username')
return j2shim.r2r('webview/online_users.html', {'userlist' : userlist}, request=request)
@login_required
def set_rating_autovote(request, song_id, user_rating):
"""
Set a user's rating on a song. From 0 to 5
"""
int_vote = int(user_rating)
if int_vote <= 5 and int_vote > 0:
S = m.Song.objects.get(id = song_id)
S.set_vote(int_vote, request.user)
#add_event(event="nowplaying")
# Successful vote placed.
try:
refer = request.META['HTTP_REFERER']
return HttpResponseRedirect(refer)
except:
return HttpResponseRedirect("/")
# If the user tries any funny business, we redirect to the queue. No messing!
return HttpResponseRedirect(reverse("dv-queue"))
@login_required
def set_rating(request, song_id):
"""
Set a user's rating on a song. From 0 to 5
"""
if request.method == 'POST':
try:
R = int(request.POST['Rating'])
except:
return HttpResponseRedirect(reverse('dv-song', args=[song_id]))
if R <= 5 and R >= 1:
S = m.Song.objects.get(id = song_id)
S.set_vote(R, request.user)
return HttpResponseRedirect(S.get_absolute_url())
def link_category(request, slug):
"""
View all links associated with a specific link category slug
"""
link_cat = get_object_or_404(m.LinkCategory, id_slug = slug)
link_data_txt = m.Link.objects.filter(status="A").filter(link_type="T").filter(url_cat=link_cat) # See what linkage data we have
return j2shim.r2r('webview/links_category.html', \
{'links_txt' : link_data_txt, 'cat' : link_cat}, \
request=request)
@login_required
def link_create(request):
"""
User submitted links appear using this form for moderators to approve. Once sent, they are directed to
A generic 'Thanks' page.
"""
auto_approve = getattr(settings, 'ADMIN_AUTO_APPROVE_LINK', 0)
if request.method == 'POST':
# Check to see if moderation settings allow for the check
if request.user.is_staff and auto_approve == 1:
# Automatically approved due to Moderator status
status = 'A'
else:
status = 'P'
l = m.Link(submitted_by = request.user, status = status)
form = f.CreateLinkForm(request.POST, request.FILES, instance = l)
if form.is_valid():
new_link = form.save(commit=False)
new_link.save()
form.save_m2m()
return j2shim.r2r('webview/link_added.html', request=request) # Redirect to 'Thanks!' screen!
else:
form = f.CreateLinkForm()
return j2shim.r2r('webview/create_link.html', { 'form' : form }, request=request)
@permission_required('webview.change_link')
def activate_links(request):
"""
Show all currently pending links in the system. Only the l33t may access.
"""
if "link" in request.GET and "status" in request.GET:
linkid = int(request.GET['link'])
status = request.GET['status']
this_link = m.Link.objects.get(id=linkid)
if status == 'A':
this_link.status = "A"
this_link.log(request.user, "Accepted link")
this_link.approved_by = request.user
if status == 'R':
this_link.status = "R"
this_link.log(request.user, "Rejected link")
this_link.approved_by = request.user
# Save this to the DB
this_link.save()
#links = Link.objects.filter(status = "P")
links_txt = m.Link.objects.filter(status="P").filter(link_type="T")
#links_but = Link.objects.filter(status="P").filter(link_type="U")
#links_ban = Link.objects.filter(status="P").filter(link_type="B")
return j2shim.r2r('webview/pending_links.html', { 'text_links' : links_txt }, request=request)
def site_links(request):
"""
Show all active links for this site
"""
link_cats = m.LinkCategory.objects.all() # All categories in the system
return j2shim.r2r('webview/site-links.html', { 'link_cats' : link_cats }, request=request)
def memcached_status(request):
try:
import memcache
except ImportError:
return HttpResponseRedirect("/")
if not (request.user.is_authenticated() and
request.user.is_staff):
return HttpResponseRedirect("/")
# get first memcached URI
match = re.match(
"memcached://([.\w]+:\d+)", settings.CACHE_BACKEND
)
if not match:
return HttpResponseRedirect("/")
host = memcache._Host(match.group(1))
host.connect()
host.send_cmd("stats")
class Stats:
pass
stats = Stats()
while 1:
line = host.readline().split(None, 2)
if line[0] == "END":
break
stat, key, value = line
try:
# convert to native type, if possible
value = int(value)
if key == "uptime":
value = datetime.timedelta(seconds=value)
elif key == "time":
value = datetime.datetime.fromtimestamp(value)
except ValueError:
pass
setattr(stats, key, value)
host.close_socket()
return j2shim.r2r(
'webview/memcached_status.html', dict(
stats=stats,
hit_rate=100 * stats.get_hits / stats.cmd_get,
time=datetime.datetime.now(), # server time
), request=request)
class LicenseList(WebView):
template = "licenselist.html"
def set_context(self):
licenses = m.SongLicense.objects.all()
return {'licenses': licenses}
class License(WebView):
template = "license.html"
def set_context(self):
id = self.kwargs.get("id")
license = m.SongLicense.objects.get(id=id)
return {'license': license}
class Login(MyBaseView):
template="registration/login.html"
MAX_FAILS_PER_HOUR = getattr(settings, "MAX_FAILED_LOGINS_PER_HOUR", 5)
def pre_view(self):
self.context['next'] = self.request.REQUEST.get("next", "")
self.context['username'] = self.request.REQUEST.get("username", "")
self.context['error'] = ""
def check_limit(self, keys):
for key in keys:
if cache.get(key, 0) > self.MAX_FAILS_PER_HOUR:
return True
return False
def add_to_limit(self, keys):
for key in keys:
if cache.get(key, None) == None:
cache.set(key, 1, 60*60)
else:
cache.incr(key)
def POST(self):
ip = self.request.META.get("REMOTE_ADDR")
username = self.request.POST.get('username', "")
password = self.request.POST.get('password', "")
key1 = hashlib.md5("loginfail" + username).hexdigest()
key2 = hashlib.md5("loginfail" + ip).hexdigest()
if self.check_limit((key1, key2)):
self.context['error'] = _("Too many failed logins. Please wait an hour before trying again.")
return False
next = self.request.POST.get("next", False)
if not username or not password:
self.context['error'] = _(u"You need to supply a username and password")
return
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(self.request, user)
return self.redirect(next or 'dv-root')
else:
self.context['error'] = _(u"I'm sorry, your account have been disabled.")
else:
self.add_to_limit((key1, key2))
self.context['error'] = _(u"I'm sorry, the username or password seem to be wrong.")
def play_stream(request):
streamurl = getattr(settings, "FLASH_STREAM_URL", False)
if not streamurl:
surl = m.RadioStream.objects.filter(streamtype="M").order_by('?')
if surl:
streamurl = surl[0].url
else:
streamurl = "No MP3 Streams!"
return j2shim.r2r(
'webview/radioplay.html', dict(
streamurl=streamurl,
), request=request)
def upload_progress(request):
"""
Return JSON object with information about the progress of an upload.
"""
progress_id = ''
if 'X-Progress-ID' in request.GET:
progress_id = request.GET['X-Progress-ID']
elif 'X-Progress-ID' in request.META:
progress_id = request.META['X-Progress-ID']
if progress_id:
from django.utils import simplejson
cache_key = "%s_%s" % (request.META['REMOTE_ADDR'], progress_id)
data = cache.get(cache_key)
return HttpResponse(simplejson.dumps(data))
else:
return HttpResponseServerError('Server Error: You must provide X-Progress-ID header or query param.')
| [
"[email protected]"
] | |
9c68ae44c857794289d718b86b9cf28781944546 | d49f38323dc30a3cb4a581b451f7db7eec220324 | /app.py | c50f59488d7cad0a63272dce103f97c62cf594dd | [] | no_license | bbiyongel/NaverAPI-telegram | 0e67259ed2faa86860014f0a5ff1ee0528175b67 | bfcffdb03c6c2cb2387aee461490c520542227bf | refs/heads/master | 2022-01-15T19:50:28.409431 | 2019-07-12T09:00:15 | 2019-07-12T09:00:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,740 | py | from pprint import pprint
from flask import Flask, request
import requests
from decouple import config
import random
app = Flask(__name__)
token = config('TELEGRAM_TOKEN')
base_url = f"https://api.telegram.org/bot{token}"
naver_client_id = config('NAVER_CLIENT_ID')
naver_client_secret = config('NAVER_CLIENT_SECRET')
@app.route(f'/{token}', methods=['POST']) #
def telegram():
response = request.get_json()
chat_id = response.get('message').get('chat').get('id')
# 사진 파일이 온다면,
if response.get('message').get('photo'):
# 사진 파일의 id를 가져온다
file_id = response.get('message').get('photo')[-1].get('file_id')
# 텔레그램 서버에 파일의 경로를 받아온다.
file_response = requests.get(
f'{base_url}/getFile?file_id={file_id}').json()
# 파일 경로를 통해 URL을 만든다.
file_path = file_response.get('result').get('file_path')
file_url = f'https://api.telegram.org/file/bot{token}/{file_path}'
# print(file_url)
response = requests.get(file_url, stream=True)
image = response.raw.read()
# 2. URL 설정
naver_url = 'https://openapi.naver.com/v1/vision/celebrity'
# 3. 요청보내기! POST
headers = {'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
response = requests.post(naver_url, headers=headers, files={'image': image}).json()
if response.get('faces'):
best = response.get('faces')[0].get('celebrity')
if best.get('confidence') > 0.2:
text = f"{best.get('confidence')*100}%만큼 {best.get('value')}를 닮으셨네요"
else:
text = "연예인을 닮지 않음..."
else:
text = "사람 아닌듯"
# print(text)
api_url = f'{base_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(api_url)
# text가 온다면
elif response.get('message').get('text'):
# 사용자가 보낸 메시지를 text 변수에 저장, 사용자 정보는 chat_id에 저장
text = response.get('message').get('text')
chat_id = response.get('message').get('chat').get('id')
if '/번역 ' == text[0:4]:
headers = {'X-Naver-Client-Id': naver_client_id,
'X-Naver-Client-Secret': naver_client_secret
}
data = {
'source': 'ko',
'target': 'en',
'text': text[4:]
}
# data = {
# 'source': 'en',
# 'target': 'ko',
# 'text': 'War never again! Never again war!'
# }
response = requests.post(naver_url, headers=headers, data=data).json()
text = response.get('message').get('result').get('translatedText')
# if 인사말이 오면, 나만의 인사해주기
elif '안녕' in text or 'hi' in text:
text = '간디'
elif '로또' in text:
text = sorted(random.sample(range(1,46), 6))
# 마지막 url 만들어서 메시지 보내기
if text=='호우':
text = '장마임'
if text=='패드립':
text = '패드립 머신 가동'
api_url = f'{base_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(api_url)
return 'OK', 200 # 200 : 응답 상태 코드
if __name__ == '__main__':
import os
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| [
"[email protected]"
] | |
1c07148d7ab0dac268d97289f85bcfd5323f3892 | 4c7ccea26d2a6f7197fcdd7b8413652cea199485 | /IPython/SdA/StackeddAutoencoder.py | 88ef597fa78dc3337214ffa36a0bb97d7a894564 | [] | no_license | cgallego/Section3 | 77fc1c8e5f6dfa273775f165cfb54f28c05e0f52 | 1745cb018811541b6ece603f2762ef05cc263b3b | refs/heads/master | 2021-01-19T06:41:31.153702 | 2016-08-08T16:45:43 | 2016-08-08T16:45:43 | 60,637,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,074 | py | """
Stacked denoising auto-encoders (SdA) using Theano.
Denoising autoencoders are the building blocks for SdA.
They are based on auto-encoders as the ones used in Bengio et al. 2007.
An autoencoder takes an input x and first maps it to a hidden representation
y = f_{\theta}(x) = s(Wx+b), parameterized by \theta={W,b}. The resulting
latent representation y is then mapped back to a "reconstructed" vector
z \in [0,1]^d in input space z = g_{\theta'}(y) = s(W'y + b'). The weight
matrix W' can optionally be constrained such that W' = W^T, in which case
the autoencoder is said to have tied weights. The network is trained such
that to minimize the reconstruction error (the error between x and z).
For the denosing autoencoder, during training, first x is corrupted into
\tilde{x}, where \tilde{x} is a partially destroyed version of x by means
of a stochastic mapping. Afterwards y is computed as before (using
\tilde{x}), y = s(W\tilde{x} + b) and z as s(W'y + b'). The reconstruction
error is now measured between z and the uncorrupted input x, which is
computed as the cross-entropy :
- \sum_{k=1}^d[ x_k \log z_k + (1-x_k) \log( 1-z_k)]
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,
2008
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import os
import sys
import timeit
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from MultilayerPerceptron import HiddenLayer
from dAutoencoder import dA
from LogisticRegression import LogisticRegression
# start-snippet-1
class SdA(object):
"""Stacked denoising auto-encoder class (SdA)
A stacked denoising autoencoder model is obtained by stacking several
dAs. The hidden layer of the dA at layer `i` becomes the input of
the dA at layer `i+1`. The first layer dA gets as input the input of
the SdA, and the hidden layer of the last dA represents the output.
Note that after pretraining, the SdA is dealt with as a normal MLP,
the dAs are only used to initialize the weights.
"""
def __init__(
self,
numpy_rng,
theano_rng=None,
n_ins=None,
hidden_layers_sizes=None,
corruption_levels=None,
n_outs=None
):
""" This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the sdA
:type n_layers_sizes: list of ints
:param n_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type corruption_levels: list of float
:param corruption_levels: amount of corruption to use for each
layer
"""
self.sigmoid_layers = []
self.dA_layers = []
self.params = []
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the data is presented as rasterized images
self.y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
# The SdA is an MLP, for which all weights of intermediate layers
# are shared with a different denoising autoencoders
# We will first construct the SdA as a deep multilayer perceptron,
# and when constructing each sigmoidal layer we also construct a
# denoising autoencoder that shares weights with that layer
# During pretraining we will train these autoencoders (which will
# lead to chainging the weights of the MLP as well)
# During finetunining we will finish training the SdA by doing
# stochastich gradient descent on the MLP
for i in range(self.n_layers):
# construct the sigmoidal layer
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
# its arguably a philosophical question...
# but we are going to only declare that the parameters of the
# sigmoid_layers are parameters of the StackedDAA
# the visible biases in the dA are parameters of those
# dA, but not the SdA
self.params.extend(sigmoid_layer.params)
# Construct a denoising autoencoder that shared weights with this
# layer
dA_layer = dA(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=sigmoid_layer.W,
bhid=sigmoid_layer.b)
self.dA_layers.append(dA_layer)
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs
)
self.params.extend(self.logLayer.params)
# construct a function that implements one step of finetunining
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def pretraining_functions(self, train_set_x, np_train_y, batch_size):
''' Generates a list of functions, each of them implementing one
step in trainnig the dA corresponding to the layer with same index.
The function will require as input the minibatch index, and to train
a dA you just need to iterate, calling the corresponding function on
all minibatch indexes.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: float
:param learning_rate: learning rate used during training for any of
the dA layers
'''
# index to a [mini]batch
index = T.lscalar('index') # index to a minibatch
corruption_level = T.scalar('corruption') # % of corruption to use
learning_rate = T.scalar('lr') # learning rate to use
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dAuto,kdA in zip(self.dA_layers, range(len(self.dA_layers))):
print(kdA,dAuto)
# get the cost and the updates list
cost, updates = dAuto.get_cost_updates(corruption_level,
learning_rate)
# compile the theano function
fn = theano.function(
inputs=[
index,
theano.In(corruption_level, value=0.2),
theano.In(learning_rate, value=0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin: batch_end]
}
)
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches //= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches //= batch_size
# compute number of minibatches for training, validation and testing
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = [
(param, param - gparam * learning_rate)
for param, gparam in zip(self.params, gparams)
]
train_fn = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: train_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='train'
)
test_score_i = theano.function(
[index],
self.errors,
givens={
self.x: test_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: test_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='test'
)
valid_score_i = theano.function(
[index],
self.errors,
givens={
self.x: valid_set_x[
index * batch_size: (index + 1) * batch_size
],
self.y: valid_set_y[
index * batch_size: (index + 1) * batch_size
]
},
name='valid'
)
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in range(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in range(n_test_batches)]
return train_fn, valid_score, test_score
def sigmoid_activate(self, Xtest, W, b):
# code and compute
sigmoid_input = Xtest
sigmoid_output = np.tanh(np.dot(sigmoid_input, W.get_value(borrow=True)) + b.get_value(borrow=True))
return sigmoid_output
def softmax_activate(self, Xtest, logLayer):
# code and compute
softmax_input = Xtest
v = np.exp( np.dot(softmax_input, logLayer.W.get_value(borrow=True)) + logLayer.b.get_value(borrow=True))
softmax_output = v/np.sum(v)
return softmax_output
def predict_functions(self, Xtest):
''' Given a set_x of examples produce a vector y' of predictions by the sDA.
'''
tmp = Xtest
for L in self.sigmoid_layers:
tmp = self.sigmoid_activate( tmp, L.W, L.b )
# finalize with log layer
tmp = self.softmax_activate( tmp, self.logLayer )
return tmp
| [
"[email protected]"
] | |
9ca3d949f4eba7c4f5c4434c364d62be9b136a99 | aa4024b6a846d2f6032a9b79a89d2e29b67d0e49 | /UMLRT2Kiltera_MM/graph_MT_post__Model.py | 3f264f3c35aea6264d6efa85f991b713f54237a9 | [
"MIT"
] | permissive | levilucio/SyVOLT | 41311743d23fdb0b569300df464709c4954b8300 | 0f88827a653f2e9d3bb7b839a5253e74d48379dc | refs/heads/master | 2023-08-11T22:14:01.998341 | 2023-07-21T13:33:36 | 2023-07-21T13:33:36 | 36,246,850 | 3 | 2 | MIT | 2023-07-21T13:33:39 | 2015-05-25T18:15:26 | Python | UTF-8 | Python | false | false | 2,610 | py | """
__graph_MT_post__Model.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
___________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_post__Model(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 172, 82
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin')
self.gf4 = GraphicalForm(drawing, h, "gf4")
self.graphForms.append(self.gf4)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([110.0, 41.0, 110.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_post__Model_S', width = '0', justify= 'left', stipple='' )
self.gf66 = GraphicalForm(drawing, h, 'gf66', fontObject=font)
self.graphForms.append(self.gf66)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_post__Model
| [
"levi"
] | levi |
e6a2a28a5d17ffa3424d45048710a8687df2c863 | 9256eeff108787245a1d9a8e27f80c04377ba10f | /src/datasets/mnist.py | 49071693a70659a10514560cc67cff58309b79cf | [
"MIT"
] | permissive | martinhavlicek/meta-inference-public | 99a22daef937921deb9f677f68aa1c954e456e55 | 3cad0b84acd407f3d790f3d75d3045f62bdbf250 | refs/heads/master | 2022-04-12T14:15:42.514426 | 2020-03-31T21:39:50 | 2020-03-31T21:39:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,748 | py | import math
import numpy as np
from PIL import Image
from torchvision import datasets
from torchvision import transforms
# ----- ROTATED MNIST -----
ROTATIONS = np.arange(-180, 180, 20)
DEFAULT_ROTATIONS = ROTATIONS[0::2]
UNSEEN_ROTATIONS = ROTATIONS[1::2]
DEFAULT_ROTATIONS_SPARSE = np.array([-160, -80, 0, 80, 160])
UNSEEN_ROTATIONS_SPARSE = np.array([-180, -140, -120, -100, -60, -40, -20, 20, 40, 60, 100, 120, 140])
DEFAULT_ROTATIONS_DISJOINT = ROTATIONS[:len(ROTATIONS) // 2 + 1]
UNSEEN_ROTATIONS_DISJOINT = ROTATIONS[len(ROTATIONS) // 2 + 1:]
ALL_ROTATIONS = ROTATIONS
DEFAULT_ROTATIONS_DICT = {
'standard': DEFAULT_ROTATIONS,
'sparse': DEFAULT_ROTATIONS_SPARSE,
'disjoint': DEFAULT_ROTATIONS_DISJOINT
}
UNSEEN_ROTATIONS_DICT = {
'standard': UNSEEN_ROTATIONS,
'sparse': UNSEEN_ROTATIONS_SPARSE,
'disjoint': UNSEEN_ROTATIONS_DISJOINT
}
def load_many_rotated_mnist(data_dir, image_size=32, train=True,
rotations=DEFAULT_ROTATIONS):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular rotation.
"""
return [
load_rotated_mnist( data_dir, image_size=image_size,
train=train, rotation=rotation)
for rotation in rotations
]
def load_rotated_mnist(data_dir, image_size=32, train=True, rotation=0):
"""
Load a MNIST dataset where each image has a rotation.
"""
rotate_image = rotate_transform(rotation)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
rotate_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def rotate_transform(angle):
def f(img):
return transforms.functional.rotate(img, angle)
return f
# ----- SCALED MNIST -----
SCALES = np.arange(0.5, 2.0, 0.1)
DEFAULT_SCALES = SCALES[0::2]
UNSEEN_SCALES = SCALES[1::2]
DEFAULT_SCALES_SPARSE = np.array([0.6, 1.0 ,1.4, 1.8])
UNSEEN_SCALES_SPARSE = np.array([0.5, 0.7, 0.8, 0.9, 1.1, 1.2, 1.3, 1.5, 1.6, 1.7, 1.9])
DEFAULT_SCALES_DISJOINT = SCALES[:len(SCALES) // 2 + 1]
UNSEEN_SCALES_DISJOINT = SCALES[len(SCALES) // 2 + 1:]
ALL_SCALES = SCALES
DEFAULT_SCALES_DICT = {
'standard': DEFAULT_SCALES,
'sparse': DEFAULT_SCALES_SPARSE,
'disjoint': DEFAULT_SCALES_DISJOINT
}
UNSEEN_SCALES_DICT = {
'standard': UNSEEN_SCALES,
'sparse': UNSEEN_SCALES_SPARSE,
'disjoint': UNSEEN_SCALES_DISJOINT
}
def load_many_scaled_mnist( data_dir, image_size=32, train=True,
scales=DEFAULT_SCALES):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular scale.
"""
return [
load_scaled_mnist( data_dir, image_size=image_size,
train=train, scale=scale)
for scale in scales
]
def load_scaled_mnist(data_dir, image_size=32, train=True, scale=1):
"""
Load a MNIST dataset where each image has is scaled by a scale.
"""
scale_image = scale_transform(scale)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
scale_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def scale_transform(scale):
def f(img):
size = img.size
i, j, h, w = get_crop_params(img, scale, ratio=1)
return transforms.functional.resized_crop(
img, i, j, h, w, size, Image.BILINEAR)
return f
def get_crop_params(img, scale, ratio=1):
w = img.size[0] * scale
h = img.size[1] * scale
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
# ----- SHEARED MNIST -----
SHEARS = np.arange(-180, 180, 20)
DEFAULT_SHEARS = SHEARS[0::2]
UNSEEN_SHEARS = SHEARS[1::2]
DEFAULT_SHEARS_SPARSE = np.array([-160, -80, 0, 80, 160])
UNSEEN_SHEARS_SPARSE = np.array([-180, -140, -120, -100, -60, -40, -20, 20, 40, 60, 100, 120, 140])
DEFAULT_SHEARS_DISJOINT = SHEARS[:len(SHEARS) // 2 + 1]
UNSEEN_SHEARS_DISJOINT = SHEARS[len(SHEARS) // 2 + 1:]
ALL_SHEARS = SHEARS
DEFAULT_SHEARS_DICT = {
'standard': DEFAULT_SHEARS,
'sparse': DEFAULT_SHEARS_SPARSE,
'disjoint': DEFAULT_SHEARS_DISJOINT
}
UNSEEN_SHEARS_DICT = {
'standard': UNSEEN_SHEARS,
'sparse': UNSEEN_SHEARS_SPARSE,
'disjoint': UNSEEN_SHEARS_DISJOINT
}
def load_many_sheared_mnist(data_dir, image_size=32, train=True,
shears=DEFAULT_SHEARS):
"""
Load 10 different MNIST datasets where the image in each dataset
has a particular shear.
"""
return [
load_sheared_mnist( data_dir, image_size=image_size,
train=train, shear=shear)
for shear in shears
]
def load_sheared_mnist(data_dir, image_size=32, train=True, shear=0):
"""
Load a MNIST dataset where each image has a rotation.
"""
shear_image = shear_transform(shear)
image_transforms = [
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
shear_image,
transforms.ToTensor(),
]
image_transforms = transforms.Compose(image_transforms)
dset = datasets.MNIST(data_dir, train=train, download=True,
transform=image_transforms)
return dset
def shear_transform(shear):
def f(img):
return transforms.functional.affine(img, 0, (0, 0), 1, shear)
return f
| [
"[email protected]"
] | |
ac81e7a4a5a4e1eec99fc4dd938031a42d326728 | 1064db5dfd154c4bc600e0e03841b0f73f0eefbc | /home/migrations/0008_auto_20200529_0800.py | 55f78b9f74855b21f14e8caf061dee753c0981a6 | [] | no_license | crowdbotics-apps/web-29-dev-5196 | 3303921a0e5c8794e8e67f55c9841f3ec7610c16 | 7beda8f7d57ce9b9858a46f7e3940d6eed4b5725 | refs/heads/master | 2023-05-26T23:00:23.271209 | 2020-05-29T12:47:07 | 2020-05-29T12:47:07 | 267,768,914 | 0 | 0 | null | 2021-06-13T04:08:30 | 2020-05-29T04:59:18 | Python | UTF-8 | Python | false | false | 342 | py | # Generated by Django 2.2.12 on 2020-05-29 08:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("home", "0007_customtext_kjhkh"),
]
operations = [
migrations.RenameField(
model_name="customtext", old_name="kjhkh", new_name="ghfnhgfgjh",
),
]
| [
"[email protected]"
] | |
b5c1fff82ac0901d1ae985cd1826ca4b47c6f5af | 5b93930ce8280b3cbc7d6b955df0bfc5504ee99c | /nodes/Bisong19Building/I_PartVIII/C_Chapter47/index.py | cce9e2225cec24eabc5302e3a2817b1a5b9cd72f | [] | no_license | nimra/module_gen | 8749c8d29beb700cac57132232861eba4eb82331 | 2e0a4452548af4fefd4cb30ab9d08d7662122cf4 | refs/heads/master | 2022-03-04T09:35:12.443651 | 2019-10-26T04:40:49 | 2019-10-26T04:40:49 | 213,980,247 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,830 | py | # Lawrence McAfee
# ~~~~~~~~ import ~~~~~~~~
from modules.node.HierNode import HierNode
from modules.node.LeafNode import LeafNode
from modules.node.Stage import Stage
from modules.node.block.CodeBlock import CodeBlock as cbk
from modules.node.block.ImageBlock import ImageBlock as ibk
from modules.node.block.MarkdownBlock import MarkdownBlock as mbk
from .A_Overviewof.index import Overviewof as A_Overviewof
from .B_Createa.index import Createa as B_Createa
from .C_BuildContainers.index import BuildContainers as C_BuildContainers
from .D_Compilethe.index import Compilethe as D_Compilethe
from .E_Uploadand.index import Uploadand as E_Uploadand
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# CHAPTER 47
#
#
#
# Deploying
# an End-to-End Machine
# Learning Solution
# on Kubeflow Pipelines
# A Kubeflow pipeline component is an implementation of a pipeline task. A component
# is a step in the workflow. Each task takes one or more artifacts as input and may produce
# one or more artifacts as output.
# Each component usually includes two parts:
#
# • Client code: The code that talks to endpoints to submit jobs, for
# example, code to connect with the Google Cloud Machine Learning
# Engine.
#
# • Runtime code: The code that does the actual job and usually runs in
# the cluster, for example, the code that prepares the model for training
# on Cloud MLE.
# A component consists of an interface (inputs/outputs), the implementation
# (a Docker container image and command-line arguments), and metadata (name,
# description).
#
#
#
#
# 687
# © Ekaba Bisong 2019
# E. Bisong, Building Machine Learning and Deep Learning Models on Google Cloud Platform,
# https://doi.org/10.1007/978-1-4842-4470-8_47
#
# Chapter 47 Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines
#
#
# Overview of a Simple End-to-End Solution Pipeline
# In this simple example, we will implement a deep neural regressor network to predict the
# closing prices of Bitcoin crypto-currency. The machine learning code itself is pretty basic
# as it is not the focus of this article. The goal here is to orchestrate a machine learning
# engineering solution using microservice architectures on Kubernetes with Kubeflow
# Pipelines. The code for this chapter is in the book code repository. Clone the repository
# from the GCP Cloud Shell.
# The pipeline consists of the following components:
#
# 1. Move raw data hosted on GitHub to a storage bucket.
#
# 2. Transform the dataset using Google Dataflow.
#
# 3. Carry out hyper-parameter training on Cloud Machine
# Learning Engine.
#
# 4. Train the model with the optimized hyper-parameters.
#
# 5. Deploy the model for serving on Cloud MLE.
#
#
#
# Create a Container Image for Each Component
# First, we’ll package the client and runtime code into a Docker image. This image
# also contains the secure service account key to authenticate against GCP. For example,
# the component to transform the dataset using Dataflow has the following files built into
# its image:
# • __ Dockerfile: Dockerfile to build the Docker image.
#
# • __ build.sh: Script to initiate the container build and upload to
# Google Container Registry.
#
# • __ dataflow_transform.py: Code to run the beam pipeline on
# Cloud Dataflow.
#
# • __ service_account.json: Secure key to authenticate container
# on GCP.
#
# • __ local_test.sh: Script to run the image pipeline component
# locally.
#
#
# 688
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Content(LeafNode):
def __init__(self):
super().__init__(
"Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines",
# Stage.REMOVE_EXTRANEOUS,
# Stage.ORIG_BLOCKS,
# Stage.CUSTOM_BLOCKS,
# Stage.ORIG_FIGURES,
# Stage.CUSTOM_FIGURES,
# Stage.CUSTOM_EXERCISES,
)
self.add(mbk("# Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Chapter47(HierNode):
def __init__(self):
super().__init__("Chapter 47: Deploying an End-to-End Machine Learning Solution on Kubeflow Pipelines")
self.add(Content())
self.add(A_Overviewof())
self.add(B_Createa())
self.add(C_BuildContainers())
self.add(D_Compilethe())
self.add(E_Uploadand())
# eof
| [
"[email protected]"
] | |
efbfb275ecf4ddfacfe040a07abe20a304942382 | 866dee1b3d01b863c31332ec81330d1b5ef5c6fa | /openquake.hazardlib/openquake/hazardlib/gsim/campbell_bozorgnia_2003.py | d16f63883e1925478903cecb7406eb1dbb6030d7 | [
"MIT",
"AGPL-3.0-only"
] | permissive | rainzhop/ConvNetQuake | 3e2e1a040952bd5d6346905b83f39889c6a2e51a | a3e6de3f7992eac72f1b9883fec36b8c7fdefd48 | refs/heads/master | 2020-08-07T16:41:03.778293 | 2019-11-01T01:49:00 | 2019-11-01T01:49:00 | 213,527,701 | 0 | 0 | MIT | 2019-10-08T02:08:00 | 2019-10-08T02:08:00 | null | UTF-8 | Python | false | false | 8,419 | py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`CampbellBozorgnia2003NSHMP2007`.
"""
from __future__ import division
import numpy as np
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, SA
class CampbellBozorgnia2003NSHMP2007(GMPE):
"""
Implements GMPE developed by Kenneth W. Campbell and Yousef Bozorgnia and
published as "Updated Near-Source Ground-Motion (Attenuation) Relations for
the Horizontal and Vertical Components of Peak Ground Acceleration and
Acceleration Responce Spectra", Bulletin of the Seismological Society of
America, Vol. 93, No. 1, pp. 314-331, 2003.
The class implement the equation as modified by the United States
Geological Survey - National Seismic Hazard Mapping Project (USGS-NSHMP)
for the 2007 Alaska model
(http://earthquake.usgs.gov/hazards/products/ak/2007/).
The class replicates the equation as coded in ``subroutine getCamp2000``
in ``hazFXv7.f`` available from
http://earthquake.usgs.gov/hazards/products/ak/2007/software/.
The equation compute mean value for the 'firm rock' conditon.
"""
#: Supported tectonic region type is 'active shallow crust' (see Abstract)
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are PGA and SA (see Abstract)
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
SA
])
#: Supported intensity measure component is the geometric mean of two
#: horizontal components (see paragraph 'Strong-Motion Database', page 316)
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation type is Total (see equations 11, 12 pp. 319
#: 320)
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: No sites parameters are required. Mean value is computed for
#: 'firm rock'.
REQUIRES_SITES_PARAMETERS = set(())
#: Required rupture parameters are magnitude, rake and dip (eq. 1 and
#: following, page 319).
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'rake', 'dip'))
#: Required distance measure are RRup and Rjb (eq. 1 and following,
#: page 319).
REQUIRES_DISTANCES = set(('rrup', 'rjb'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
C = self.COEFFS[imt]
mean = self._get_mean(
C, rup.mag, rup.rake, rup.dip, dists.rrup, dists.rjb
)
stddevs = self._get_stddevs(C, rup.mag, stddev_types, dists.rrup.size)
return mean, stddevs
def _get_mean(self, C, mag, rake, dip, rrup, rjb):
"""
Return mean value (eq. 1, page 319).
"""
f1 = self._compute_magnitude_scaling(C, mag)
f2 = self._compute_distance_scaling(C, mag, rrup)
f3 = self._compute_faulting_mechanism(C, rake, dip)
f4 = self._compute_far_source_soil_effect(C)
f5 = self._compute_hanging_wall_effect(C, rjb, rrup, dip, mag)
mean = (
C['c1'] + f1 + C['c4'] * np.log(np.sqrt(f2)) + f3 + f4 + f5
)
return mean
def _get_stddevs(self, C, mag, stddev_types, num_sites):
"""
Return standard deviation as defined in eq.11 page 319.
"""
std = C['c16'] + np.zeros(num_sites)
if mag < 7.4:
std -= 0.07 * mag
else:
std -= 0.518
# only the 'total' standard deviation is supported, therefore the
# std is always the same for all types
stddevs = [std for _ in stddev_types]
return stddevs
def _compute_magnitude_scaling(self, C, mag):
"""
Compute and return magnitude scaling term (eq.2, page 319)
"""
return C['c2'] * mag + C['c3'] * (8.5 - mag) ** 2
def _compute_distance_scaling(self, C, mag, rrup):
"""
Compute distance scaling term (eq.3, page 319).
The distance scaling assumes the near-source effect of local site
conditions due to 50% very firm soil and soft rock and 50% firm rock.
"""
g = C['c5'] + C['c6'] * 0.5 + C['c7'] * 0.5
return (
rrup ** 2 +
(np.exp(C['c8'] * mag + C['c9'] * (8.5 - mag) ** 2) * g) ** 2
)
def _compute_faulting_mechanism(self, C, rake, dip):
"""
Compute faulting mechanism term (see eq. 5, page 319).
Reverse faulting is defined as occurring on steep faults (dip > 45)
and rake in (22.5, 157.5).
Thrust faulting is defined as occurring on shallow dipping faults
(dip <=45) and rake in (22.5, 157.5)
"""
# flag for reverse faulting
frv = float((dip > 45) and (22.5 <= rake <= 157.5))
# flag for thrust faulting
fth = float((dip <= 45) and (22.5 <= rake <= 157.5))
return C['c10'] * frv + C['c11'] * fth
def _compute_far_source_soil_effect(self, C):
"""
Compute far-source effect of local site conditions (see eq. 6,
page 319) assuming 'firm rock' conditions.
"""
return C['c14']
def _compute_hanging_wall_effect(self, C, rjb, rrup, dip, mag):
"""
Compute hanging-wall effect (see eq. 7, 8, 9 and 10 page 319).
Considers correct version of equation 8 as given in the erratum and not
in the original paper.
"""
# eq. 8 (to be noticed that the USGS-NSHMP implementation defines
# the hanging-wall term for all rjb distances, while in the original
# manuscript, hw is computed only for rjb < 5). Again the 'firm rock'
# is considered
hw = np.zeros_like(rjb)
if dip <= 70.:
hw = (5. - rjb) / 5.
# eq. 9
f_m = 1 if mag > 6.5 else mag - 5.5
# # eq. 10
f_rrup = C['c15'] + np.zeros_like(rrup)
idx = rrup < 8
f_rrup[idx] *= rrup[idx] / 8
# eq. 7 (to be noticed that the f3 factor is not included
# while this is defined in the original manuscript)
f_hw = hw * f_m * f_rrup
return f_hw
#: Coefficient table (table 4, page 321. Coefficients for horizontal
#: component and for corrected PGA)
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 c16
pga -4.033 0.812 0.036 -1.061 0.041 -0.005 -0.018 0.766 0.034 0.343 0.351 -0.123 -0.138 -0.289 0.370 0.920
0.10 -2.661 0.812 0.060 -1.308 0.166 -0.009 -0.068 0.621 0.046 0.224 0.313 -0.146 -0.253 -0.299 0.370 0.958
0.20 -2.771 0.812 0.030 -1.153 0.098 -0.014 -0.038 0.704 0.026 0.296 0.342 -0.148 -0.183 -0.330 0.370 0.981
0.30 -2.999 0.812 0.007 -1.080 0.059 -0.007 -0.022 0.752 0.007 0.359 0.385 -0.162 -0.157 -0.453 0.370 0.984
0.50 -3.556 0.812 -0.035 -0.964 0.023 -0.002 -0.004 0.842 -0.036 0.406 0.479 -0.122 -0.130 -0.528 0.370 0.990
1.0 -3.867 0.812 -0.101 -0.964 0.019 0 0 0.842 -0.105 0.329 0.338 -0.073 -0.072 -0.607 0.281 1.021
2.0 -4.311 0.812 -0.180 -0.964 0.019 0 0 0.842 -0.187 0.060 0.064 -0.124 -0.116 -0.649 0.160 1.021
""")
| [
"[email protected]"
] | |
de3acc9720419a15a1a42835f76a34d6293154c3 | 16c77266859989d156fe3f4d0ce3a37a1898ad38 | /dacc/xls/write.py | 1fad19e6f9792761fed509ba748792ebd263a457 | [
"MIT"
] | permissive | SRHerzog/ut | 92620e66be2ea9707d9cd3cf390179326ed2eefe | 894bd5607eb76676aaea7a37ed8a91b5fb5e805e | refs/heads/master | 2021-06-30T19:15:46.131299 | 2017-09-15T20:47:35 | 2017-09-15T20:47:35 | 103,696,926 | 0 | 0 | null | 2017-09-15T20:08:10 | 2017-09-15T20:08:10 | null | UTF-8 | Python | false | false | 3,367 | py | __author__ = 'thor'
import os
import pandas as pd
from pandas import ExcelWriter
from openpyxl import load_workbook
from openpyxl.reader.excel import InvalidFileException
try:
from xlwings import Workbook, Sheet
except ImportError as e:
print(e)
def multiple_dfs_to_multiple_sheets(df_list, xls_filepath, sheet_list=None, **kwargs):
"""
Writes multiple dataframes in different excel sheets.
Input:
* xls_filepath: The excel file to write into
* And then there's several choices:
* df_list (a list of dataframes) and sheet_list (a list of corresponding names)
* df_list = a list of {sheet_name: dataframe}
* df_list = a list of (sheet_name, dataframe) tuples, when the order of the sheets matters)
--> If no sheet names are given, the function either gives the name of the dataframe (if any), or
simply iterates over sheet numbers...
"""
if sheet_list is None:
if isinstance(df_list, dict):
# df_list, sheet_list = zip(df_list.values(), df_list.keys())
df_list, sheet_list = df_list.values(), df_list.keys()
elif isinstance(df_list[0], tuple):
sheet_list = map(lambda x: x[0], df_list)
df_list = map(lambda x: x[1], df_list)
else:
sheet_list = []
for i, df in enumerate(df_list):
name = df.name
if not name:
name = "sheet {}".format(i)
sheet_list.append(name)
writer = ExcelWriter(xls_filepath)
for df, sheet_name in zip(df_list, sheet_list):
df.to_excel(writer, sheet_name, **kwargs)
writer.save()
def df_to_excel_without_overwriting_it(df, xls_filepath, sheet_name, **kwargs):
"""
write df to an excel sheet without overwriting the whole excel file if it exists
(may need to create the excel with some data in it already for this to work)
"""
try:
book = load_workbook(xls_filepath)
writer = pd.ExcelWriter(xls_filepath, engine='openpyxl')
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
try:
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
except TypeError:
df = _replace_non_numeric_non_strings_with_strings(df)
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
writer.save()
except InvalidFileException:
try:
df.to_excel(excel_writer=xls_filepath, sheet_name=sheet_name, **kwargs)
except TypeError:
df = _replace_non_numeric_non_strings_with_strings(df)
df.to_excel(excel_writer=writer, sheet_name=sheet_name, **kwargs)
def clear_sheet_contents_without_changing_formatting(xls_filepath, sheet_name):
if os.path.exist(xls_filepath): # else do nothing
with Workbook(fullname=xls_filepath, app_visible=False) as wkb:
Sheet(sheet=sheet_name, wkb=wkb).clear_contents()
def _replace_non_numeric_non_strings_with_strings(df):
index_names = df.index.names
df = df.reset_index(drop=False, inplace=False)
for c in df.columns:
if df[c].dtype.name == 'object':
if not isinstance(df[c].iloc[0], basestring):
df[c] = df[c].apply(str)
df = df.set_index(index_names)
return df
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.