blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1444fabc87c6268c4d00f05ceae669c89e1d8f14 | a2ac73af04a07bb070cd85c88778608b561dd3e4 | /addons/account_analytic_default/__init__.py | 2572f5f1572ae78ee423ba7fc1e6ff5727c8bdb2 | []
| no_license | sannareddy/openerp-heimai | c849586d6099cc7548dec8b3f1cc7ba8be49594a | 58255ecbcea7bf9780948287cf4551ed6494832a | refs/heads/master | 2021-01-15T21:34:46.162550 | 2014-05-13T09:20:37 | 2014-05-13T09:20:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | /usr/share/pyshared/openerp/addons/account_analytic_default/__init__.py | [
"[email protected]"
]
| |
9f68602bf7b15c30e7a42b0271be92782200d2a9 | 1f7b5698dfa57481d88b0cb04f1d2169b06e4970 | /tensorflow/contrib/eager/python/checkpointable_utils.py | 1fa150f3c6d20bcb6a2c0e387091796131d4f323 | [
"Apache-2.0"
]
| permissive | hejie/tensorflow | 8fbc5c753078be923c0d054a2d33a1514f6f5229 | 3a499ab4483faa486332ce0b613fe207db08ceca | refs/heads/master | 2020-04-11T06:15:53.430384 | 2018-03-08T01:15:21 | 2018-03-08T01:15:21 | 124,330,100 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,751 | py | """Utilities for working with Checkpointable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import weakref
from tensorflow.contrib.eager.proto import checkpointable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable as core_checkpointable
from tensorflow.python.training import checkpointable_utils as core_checkpointable_utils
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import deprecation
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
# Key where the object graph proto is saved in a TensorBundle
_OBJECT_GRAPH_PROTO_KEY = "_CHECKPOINTABLE_OBJECT_GRAPH"
# TODO(allenl): If this ends up in a public API, consider adding LINT.IfChange
# or consolidating the implementation with get_variable.
def _default_getter(name, shape, dtype, initializer=None,
partition_info=None, **kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name, shape=shape_object, dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
def initial_value():
return initializer(
shape_object.as_list(), dtype=dtype, partition_info=partition_info)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
**kwargs
)
def add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,
initializer=None):
"""Add a variable to a Checkpointable with no scope influence."""
return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name, shape=shape, dtype=dtype,
initializer=initializer, getter=_default_getter)
def _breadth_first_checkpointable_traversal(root_checkpointable):
"""Find shortest paths to all variables owned by dependencies of root."""
bfs_sorted = []
to_visit = collections.deque([root_checkpointable])
path_to_root = {root_checkpointable: ()}
while to_visit:
current_checkpointable = to_visit.popleft()
current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
bfs_sorted.append(current_checkpointable)
for child_checkpointable in (
current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access
if child_checkpointable.ref not in path_to_root:
path_to_root[child_checkpointable.ref] = (
path_to_root[current_checkpointable] + (child_checkpointable,))
to_visit.append(child_checkpointable.ref)
return bfs_sorted, path_to_root
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(checkpointable.name)
for checkpointable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
"""Make a function for naming slot variables in an optimizer."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
"""With an optimizer specified, name a slot variable."""
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(checkpointable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(checkpointable_objects)
slot_variables = {}
for checkpointable in non_slot_objects:
if isinstance(checkpointable, optimizer_lib.Optimizer):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[checkpointable])
slot_names = checkpointable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = checkpointable.get_slot(
original_variable, slot_name)
except AttributeError:
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_checkpointable() # pylint: disable=protected-access
if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access
# TODO(allenl): Gather dependencies of slot variables.
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Checkpointable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(checkpointable_objects)
node_ids[slot_variable] = slot_variable_node_id
checkpointable_objects.append(slot_variable)
slot_variable_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph
.Object.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(checkpointable, []).append(
slot_variable_proto)
return slot_variables
def _serialize_checkpointables(
checkpointable_objects, node_ids, object_names, slot_variables):
"""Name non-slot `Checkpointable`s and add them to `object_graph_proto`."""
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
named_saveables = {}
for checkpoint_id, checkpointable in enumerate(checkpointable_objects):
assert node_ids[checkpointable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))
object_name = object_names[checkpointable]
for name, saveable in (
checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
# Figure out the name-based Saver's name for this variable.
saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(
[saveable], convert_variable_to_tensor=False)
attribute.full_name, = saver_dict.keys()
named_saveables[attribute.checkpoint_key] = saveable
for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return named_saveables, object_graph_proto
def _serialize_object_graph(root_checkpointable):
"""Determine checkpoint keys for variables and build a serialized graph.
Non-slot variables are keyed based on a shortest path from the root saveable
to the object which owns the variable (i.e. the one which called
`Checkpointable._add_variable` to create it).
Slot variables are keyed based on a shortest path to the variable being
slotted for, a shortest path to their optimizer, and the slot name.
Args:
root_checkpointable: A `Checkpointable` object whose variables (including
the variables of dependencies, recursively) should be saved.
Returns:
A tuple of (named_variables, object_graph_proto):
named_variables: A dictionary mapping names to variable objects.
object_graph_proto: A CheckpointableObjectGraph protocol buffer containing
the serialized object graph and variable references.
Raises:
ValueError: If there are invalid characters in an optimizer's slot names.
"""
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
slot_variables = _serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return _serialize_checkpointables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names,
slot_variables=slot_variables)
def gather_initializers(root_checkpointable):
"""Traverse the object graph and find initialization ops.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_checkpointable` (i.e. if they would be
saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
# TODO(allenl): Extract out gathering logic so the naming logic doesn't have
# to run.
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return [c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None]
class _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, tensor, name):
spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, "", name)
super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)
if checkpointable is None:
raise AssertionError("Unresolved object in checkpoint: %s" % (node,))
if checkpointable._update_uid < self._checkpoint.restore_uid: # pylint: disable=protected-access
raise AssertionError(
"Object not assigned a value from checkpoint: %s" % (node,))
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError("Unresolved slot restorations: %s" % (
self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but not in the objects): %s") % (
self._checkpoint.unused_attributes.items(),))
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = ops.get_default_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run restore ops in. If `None`, uses the default
session.
"""
self.run_restore_ops(session=session)
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, root_checkpointable):
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Only objects which would be saved by `Saver.save` will be initialized. See
`gather_initializers` for details.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = ops.get_default_session()
session.run(gather_initializers(self._root_checkpointable))
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called. Prefer re-encoding training checkpoints in the object-based "
"format: run save() on the object-based saver (the same one this message "
"is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
def __init__(self, object_saver, save_path):
self._object_saver = object_saver
self._save_path = save_path
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"Restoring a name-based checkpoint. No load status is available.")
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def run_restore_ops(self, session=None):
"""Load the name-based training checkpoint using a new `tf.train.Saver`."""
if session is None and not context.executing_eagerly():
session = ops.get_default_session()
saver_lib.Saver(self._object_saver._global_variable_names()).restore( # pylint: disable=protected-access
sess=session, save_path=self._save_path)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`."""
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
"""Pretends to be a session, inserts extra feeds on run()."""
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
class CheckpointableSaver(object):
"""Saves and restores a `Checkpointable` object and its dependencies.
See `Checkpointable` for details of dependency management. `Saver` wraps
`tf.train.Saver` for saving, including extra information about the graph of
dependencies between Python objects. When restoring, it uses this information
about the save-time dependency graph to more robustly match objects with their
checkpointed values. When executing eagerly, it supports restoring variables
on object creation (see `Saver.restore`).
Values in a checkpoint are mapped to `Checkpointable` Python objects
(`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
checkpoint was written. To avoid breaking existing checkpoints when modifying
a class, dependency names (the names of attributes to which `Checkpointable`
objects are assigned) may not change. These names are local to objects, in
contrast to the `Variable.name`-based save/restore from `tf.train.Saver`, and
so allow additional program transformations.
"""
def __init__(self, root_checkpointable):
"""Configure saving.
Args:
root_checkpointable: The root of the object graph to save/restore. This
object and all of its dependencies are saved in the checkpoint. When
restoring, objects are matched and restored starting from this root.
"""
# Allow passing in a weak reference to avoid reference cycles when
# `Checkpointable` objects save themselves.
self._root_checkpointable_ref = root_checkpointable
if not context.executing_eagerly():
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
else:
self._file_prefix_placeholder = None
# Op caching for save
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._last_save_saver = None
# Op caching for restore
self._object_graph_restore_tensor = None
self._last_restore_object_graph = None
self._last_restore_checkpoint = None
@property
def _root_checkpointable(self):
if isinstance(self._root_checkpointable_ref, weakref.ref):
derefed = self._root_checkpointable_ref()
assert derefed is not None
return derefed
else:
return self._root_checkpointable_ref
def save(self, file_prefix, checkpoint_number=None, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Checkpointable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_checkpointable` or one of its dependencies (via
`Checkpointable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
named_variables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}
else:
session = None
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
feed_additions = None
assert _OBJECT_GRAPH_PROTO_KEY not in named_variables
named_variables[_OBJECT_GRAPH_PROTO_KEY] = _NoRestoreSaveable(
tensor=object_graph_tensor,
name=_OBJECT_GRAPH_PROTO_KEY)
if not in_graph_mode or self._last_save_object_graph != graph_proto:
if self._last_save_object_graph is not None and in_graph_mode:
raise NotImplementedError(
"Using a single Saver to save a mutated object graph is not "
"currently supported when graph building. Use a different Saver "
"when the object graph changes (save ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
saver = saver_lib.Saver(var_list=named_variables)
if in_graph_mode:
self._last_save_saver = saver
self._last_save_object_graph = graph_proto
else:
saver = self._last_save_saver
with ops.device("/cpu:0"):
save_path = saver.save(
sess=_SessionWithFeedDictAdditions(
session=session, feed_additions=feed_additions),
save_path=file_prefix,
write_meta_graph=False,
global_step=checkpoint_number)
return save_path
def _global_variable_names(self):
"""Generate a `tf.train.Saver`-style `var_list` using `variable.name`s."""
named_saveables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
saver_names = {}
for object_proto in graph_proto.nodes:
for attribute_proto in object_proto.attributes:
saver_names[attribute_proto.full_name] = named_saveables[
attribute_proto.checkpoint_key]
return saver_names
def restore(self, save_path, session=None):
"""Restore a training checkpoint.
Restores `root_checkpointable` and any objects that it tracks
(transitive). Either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added to the `root_checkpointable` passed to the
constructor after this call will be matched if they have a corresponding
object in the checkpoint.
When building a graph, restorations are added to the graph but not run. A
session is required to retrieve checkpoint metadata.
To disallow deferred loading, assert immediately that all checkpointed
variables have been matched to variable objects:
```python
saver = Saver(root)
saver.restore(path).assert_consumed()
```
An exception will be raised unless every object was matched and its
variables already exist.
When graph building, `assert_consumed()` indicates that all of the restore
ops which will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` function of the status object:
```python
saver.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using this
object-based `Saver.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency
graph. If the checkpoint was written by the name-based `tf.train.Saver`,
names are used to match variables.
session: The session to retrieve metadata with. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
A load status object, which can be used to make assertions about the
status of checkpoint restoration and run initialization/restore ops
(of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
`save_path` is `None`).
If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
object is returned which runs restore ops from a name-based saver.
"""
if save_path is None:
return InitializationOnlyStatus(self._root_checkpointable)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
session = None
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
try:
if not in_graph_mode or self._object_graph_restore_tensor is None:
with ops.device("/cpu:0"):
object_graph_string, = io_ops.restore_v2(
prefix=file_prefix_tensor,
tensor_names=[_OBJECT_GRAPH_PROTO_KEY],
shape_and_slices=[""],
dtypes=[dtypes.string],
name="object_graph_proto_read")
if in_graph_mode:
self._object_graph_restore_tensor = object_graph_string
if in_graph_mode:
object_graph_string = session.run(
self._object_graph_restore_tensor,
feed_dict=file_prefix_feed_dict)
else:
object_graph_string = object_graph_string.numpy()
except errors_impl.NotFoundError:
# The object graph proto does not exist in this checkpoint. Try again with
# name-based saving.
return NameBasedSaverStatus(self, save_path)
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
if in_graph_mode and object_graph_proto == self._last_restore_object_graph:
checkpoint = self._last_restore_checkpoint
else:
if in_graph_mode:
dtype_map = None
else:
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
dtype_map = reader.get_variable_to_dtype_map()
checkpoint = core_checkpointable_utils._Checkpoint( # pylint: disable=protected-access
object_graph_proto=object_graph_proto,
save_path=file_prefix_tensor,
dtype_map=dtype_map)
if in_graph_mode:
if self._last_restore_object_graph is not None:
raise NotImplementedError(
"Using a single Saver to restore different object graphs is not "
"currently supported when graph building. Use a different Saver "
"for each object graph (restore ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
self._last_restore_checkpoint = checkpoint
self._last_restore_object_graph = object_graph_proto
core_checkpointable._CheckpointPosition( # pylint: disable=protected-access
checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)
load_status = CheckpointLoadStatus(
checkpoint, feed_dict=file_prefix_feed_dict)
return load_status
class Checkpoint(core_checkpointable.Checkpointable):
"""A utility class which groups `Checkpointable` objects.
Accepts arbitrary keyword arguments to its constructor and saves those values
with a checkpoint. Maintains a `save_counter` for numbering checkpoints.
Example usage:
```python
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
root = tfe.Checkpoint(optimizer=optimizer, model=model)
root.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... )
root.save(file_prefix=checkpoint_prefix)
```
For more manual control over saving, use `tfe.CheckpointableSaver` directly.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Attribute values must derive from
`CheckpointableBase`.
Raises:
ValueError: If objects in `kwargs` are not Checkpointable.
"""
super(Checkpoint, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, core_checkpointable.CheckpointableBase):
raise ValueError(
("`Checkpoint` was expecting an object derived from "
"`CheckpointableBase`, got %s.") % (v,))
setattr(self, k, v)
self._save_counter = None # Created lazily for restore-on-create.
self._saver = CheckpointableSaver(weakref.ref(self))
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
self._save_counter = add_variable(
self, name="save_counter", initializer=0, dtype=dtypes.int64)
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
"""Save a checkpoint. Wraps `tfe.CheckpointableSaver.save`."""
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1)
if in_graph_mode:
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
def restore(self, save_path):
"""Restore a checkpoint. Wraps `tfe.CheckpointableSaver.restore`."""
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to double
# initialization when executing eagerly.
self._maybe_create_save_counter()
return status
| [
"[email protected]"
]
| |
9ee45cef427b92dc2368563e25db132f1046a80f | d08cf46d3e16ab8e6a958731168469ba38daf069 | /sandbox/kdv.py | 7c7536cbb6e3c7b1398b0a73cc5dac09cb962490 | [
"BSD-2-Clause"
]
| permissive | spectralDNS/shenfun | ce808edc5258c896f2cccfbd88e67153e3f621c9 | bcda39d8d8e4741df1cafe719d81733cc1024def | refs/heads/master | 2023-07-27T20:29:57.075970 | 2023-07-11T12:33:04 | 2023-07-11T12:33:04 | 79,914,066 | 190 | 46 | BSD-2-Clause | 2022-05-11T19:10:33 | 2017-01-24T13:29:02 | Python | UTF-8 | Python | false | false | 2,876 | py | import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import PolyCollection
from shenfun import *
from mpl_toolkits.mplot3d import axes3d
N = 256
T = FunctionSpace(N, 'F', dtype='d')
#Tp = T
Tp = T.get_dealiased()
x = T.points_and_weights()[0]
u = TrialFunction(T)
v = TestFunction(T)
k = T.wavenumbers(scaled=True, eliminate_highest_freq=True)
u_ = Array(T)
Up = Array(Tp)
u_hat = Function(T)
def LinearRHS(self, u, **params):
return -Dx(u, 0, 3)
def NonlinearRHS(self, u, u_hat, rhs, **params):
rhs.fill(0)
Up[:] = Tp.backward(u_hat, Up)
rhs = Tp.forward(-0.5*Up**2, rhs)
rhs *= 1j*k
return rhs
# initialize
A = 25.
B = 16.
u_[:] = 3*A**2/np.cosh(0.5*A*(x-np.pi+2))**2 + 3*B**2/np.cosh(0.5*B*(x-np.pi+1))**2
u_hat = T.forward(u_, u_hat)
data = []
tdata = []
plt.figure()
def update(self, u, u_hat, t, tstep, plot_step, **params):
if tstep % plot_step == 0 and plot_step > 0:
u = T.backward(u_hat, u)
plt.plot(x, u)
plt.draw()
plt.pause(1e-6)
data.append(u.copy())
dt = 0.01/N**2
end_time = 0.006
par = {'plot_step': int(end_time/25/dt)}
integrator = ETDRK4(T, L=LinearRHS, N=NonlinearRHS, update=update, **par)
integrator.setup(dt)
u_hat = integrator.solve(u_, u_hat, dt, (0, end_time))
t = end_time
s = []
for d in data:
s.append(np.vstack((x, d)).T)
N = len(data)
tdata = np.linspace(0, end_time, N)
ddata = np.array(data)
fig = plt.figure(figsize=(8, 3))
#ax = axes3d.Axes3D(fig)
ax = fig.add_subplot(projection='3d')
X, Y = np.meshgrid(x, tdata)
ax.plot_wireframe(X, Y, ddata, cstride=1000)
ax.set_xlim(0, 2*np.pi)
ax.set_ylim(0, t)
ax.set_zlim(0, 2000)
ax.view_init(65, -105)
ax.set_zticks([0, 2000])
ax.grid()
fig2 = plt.figure(figsize=(8,3))
ax2 = fig2.add_subplot(projection='3d')
poly = PolyCollection(s, facecolors=(1, 1, 1, 1), edgecolors='b')
ax2.add_collection3d(poly, zs=tdata, zdir='y')
ax2.set_xlim3d(0, 2*np.pi)
ax2.set_ylim3d(0, t)
ax2.set_zlim3d(0, 2000)
ax2.view_init(65, -105)
ax2.set_zticks([0, 2000])
ax2.grid()
fig3 = plt.figure(figsize=(8, 3))
ax3 = fig3.add_subplot(projection='3d')
X, Y = np.meshgrid(x, tdata)
ax3.plot_surface(X, Y, ddata, cstride=1000, rstride=1, color='w')
ax3.set_xlim(0, 2*np.pi)
ax3.set_ylim(0, t)
ax3.set_zlim(0, 2000)
ax3.view_init(65, -105)
ax3.set_zticks([0, 2000])
ax3.grid()
fig4 = plt.figure(figsize=(8,3))
ax4 = fig4.add_subplot(projection='3d')
for i in range(len(tdata)):
ax4.plot(x, ddata[i], tdata[i])
ax4.view_init(65, -105)
ax4.set_zticks([0, 2000])
ax4.grid()
fig5 = plt.figure(facecolor='k')
ax5 = fig5.add_subplot(111, facecolor='k')
N = len(tdata)
for i in range(N):
offset = (N-i-1)*200
ax5.plot(x, ddata[N-i-1]+offset, 'w', lw=2, zorder=(i+1)*2)
ax5.fill_between(x, ddata[N-i-1]+offset, offset, facecolor='k', lw=0, zorder=(i+1)*2-1)
fig5.savefig('KdV.png')
plt.show()
| [
"[email protected]"
]
| |
93051938722fe70f250b9d683fc85b4c1c61b3e5 | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /sportsdata/soccer_scores/models/soccer_scores_season.py | 7a99fa0863fbaa4720a5ca2f39d40b300cc293c7 | []
| no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,952 | py | # coding: utf-8
"""
Soccer v3 Scores
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SoccerScoresSeason(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'season_id': 'int',
'competition_id': 'int',
'season': 'int',
'name': 'str',
'competition_name': 'str',
'start_date': 'str',
'end_date': 'str',
'current_season': 'bool',
'rounds': 'list[SoccerScoresRound]'
}
attribute_map = {
'season_id': 'SeasonId',
'competition_id': 'CompetitionId',
'season': 'Season',
'name': 'Name',
'competition_name': 'CompetitionName',
'start_date': 'StartDate',
'end_date': 'EndDate',
'current_season': 'CurrentSeason',
'rounds': 'Rounds'
}
def __init__(self, season_id=None, competition_id=None, season=None, name=None, competition_name=None, start_date=None, end_date=None, current_season=None, rounds=None): # noqa: E501
"""SoccerScoresSeason - a model defined in Swagger""" # noqa: E501
self._season_id = None
self._competition_id = None
self._season = None
self._name = None
self._competition_name = None
self._start_date = None
self._end_date = None
self._current_season = None
self._rounds = None
self.discriminator = None
if season_id is not None:
self.season_id = season_id
if competition_id is not None:
self.competition_id = competition_id
if season is not None:
self.season = season
if name is not None:
self.name = name
if competition_name is not None:
self.competition_name = competition_name
if start_date is not None:
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
if current_season is not None:
self.current_season = current_season
if rounds is not None:
self.rounds = rounds
@property
def season_id(self):
"""Gets the season_id of this SoccerScoresSeason. # noqa: E501
:return: The season_id of this SoccerScoresSeason. # noqa: E501
:rtype: int
"""
return self._season_id
@season_id.setter
def season_id(self, season_id):
"""Sets the season_id of this SoccerScoresSeason.
:param season_id: The season_id of this SoccerScoresSeason. # noqa: E501
:type: int
"""
self._season_id = season_id
@property
def competition_id(self):
"""Gets the competition_id of this SoccerScoresSeason. # noqa: E501
:return: The competition_id of this SoccerScoresSeason. # noqa: E501
:rtype: int
"""
return self._competition_id
@competition_id.setter
def competition_id(self, competition_id):
"""Sets the competition_id of this SoccerScoresSeason.
:param competition_id: The competition_id of this SoccerScoresSeason. # noqa: E501
:type: int
"""
self._competition_id = competition_id
@property
def season(self):
"""Gets the season of this SoccerScoresSeason. # noqa: E501
:return: The season of this SoccerScoresSeason. # noqa: E501
:rtype: int
"""
return self._season
@season.setter
def season(self, season):
"""Sets the season of this SoccerScoresSeason.
:param season: The season of this SoccerScoresSeason. # noqa: E501
:type: int
"""
self._season = season
@property
def name(self):
"""Gets the name of this SoccerScoresSeason. # noqa: E501
:return: The name of this SoccerScoresSeason. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SoccerScoresSeason.
:param name: The name of this SoccerScoresSeason. # noqa: E501
:type: str
"""
self._name = name
@property
def competition_name(self):
"""Gets the competition_name of this SoccerScoresSeason. # noqa: E501
:return: The competition_name of this SoccerScoresSeason. # noqa: E501
:rtype: str
"""
return self._competition_name
@competition_name.setter
def competition_name(self, competition_name):
"""Sets the competition_name of this SoccerScoresSeason.
:param competition_name: The competition_name of this SoccerScoresSeason. # noqa: E501
:type: str
"""
self._competition_name = competition_name
@property
def start_date(self):
"""Gets the start_date of this SoccerScoresSeason. # noqa: E501
:return: The start_date of this SoccerScoresSeason. # noqa: E501
:rtype: str
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this SoccerScoresSeason.
:param start_date: The start_date of this SoccerScoresSeason. # noqa: E501
:type: str
"""
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this SoccerScoresSeason. # noqa: E501
:return: The end_date of this SoccerScoresSeason. # noqa: E501
:rtype: str
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this SoccerScoresSeason.
:param end_date: The end_date of this SoccerScoresSeason. # noqa: E501
:type: str
"""
self._end_date = end_date
@property
def current_season(self):
"""Gets the current_season of this SoccerScoresSeason. # noqa: E501
:return: The current_season of this SoccerScoresSeason. # noqa: E501
:rtype: bool
"""
return self._current_season
@current_season.setter
def current_season(self, current_season):
"""Sets the current_season of this SoccerScoresSeason.
:param current_season: The current_season of this SoccerScoresSeason. # noqa: E501
:type: bool
"""
self._current_season = current_season
@property
def rounds(self):
"""Gets the rounds of this SoccerScoresSeason. # noqa: E501
:return: The rounds of this SoccerScoresSeason. # noqa: E501
:rtype: list[SoccerScoresRound]
"""
return self._rounds
@rounds.setter
def rounds(self, rounds):
"""Sets the rounds of this SoccerScoresSeason.
:param rounds: The rounds of this SoccerScoresSeason. # noqa: E501
:type: list[SoccerScoresRound]
"""
self._rounds = rounds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SoccerScoresSeason, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SoccerScoresSeason):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
e2c2d430af0dbd4f42ca76c107638345f45fd6b9 | 90a7efad0e02634fe46602cf6a9c42ce72af1823 | /udify/dataset_readers/universal_dependencies.py | cbf693061fe0bcae8de295ecbaadcd082e7efca9 | [
"MIT"
]
| permissive | foxik/udify | 563237686d1b0833ed636e48ba5cb20dced49be6 | 99f9a8d220edf808c7f2d7e32112227f21c7084a | refs/heads/master | 2020-09-02T16:26:05.757090 | 2019-11-02T23:21:03 | 2019-11-02T23:21:03 | 219,258,945 | 0 | 0 | MIT | 2019-11-03T06:03:58 | 2019-11-03T06:03:58 | null | UTF-8 | Python | false | false | 6,063 | py | """
A Dataset Reader for Universal Dependencies, with support for multiword tokens and special handling for NULL "_" tokens
"""
from typing import Dict, Tuple, List, Any, Callable
from overrides import overrides
from udify.dataset_readers.parser import parse_line, DEFAULT_FIELDS
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, TextField, SequenceLabelField, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from udify.dataset_readers.lemma_edit import gen_lemma_rule
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def lazy_parse(text: str, fields: Tuple[str, ...]=DEFAULT_FIELDS):
for sentence in text.split("\n\n"):
if sentence:
# TODO: upgrade conllu library
yield [parse_line(line, fields)
for line in sentence.split("\n")
if line and not line.strip().startswith("#")]
@DatasetReader.register("udify_universal_dependencies")
class UniversalDependenciesDatasetReader(DatasetReader):
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, 'r') as conllu_file:
logger.info("Reading UD instances from conllu dataset at: %s", file_path)
for annotation in lazy_parse(conllu_file.read()):
# CoNLLU annotations sometimes add back in words that have been elided
# in the original sentence; we remove these, as we're just predicting
# dependencies for the original sentence.
# We filter by None here as elided words have a non-integer word id,
# and are replaced with None by the conllu python library.
multiword_tokens = [x for x in annotation if x["multi_id"] is not None]
annotation = [x for x in annotation if x["id"] is not None]
if len(annotation) == 0:
continue
def get_field(tag: str, map_fn: Callable[[Any], Any] = None) -> List[Any]:
map_fn = map_fn if map_fn is not None else lambda x: x
return [map_fn(x[tag]) if x[tag] is not None else "_" for x in annotation if tag in x]
# Extract multiword token rows (not used for prediction, purely for evaluation)
ids = [x["id"] for x in annotation]
multiword_ids = [x["multi_id"] for x in multiword_tokens]
multiword_forms = [x["form"] for x in multiword_tokens]
words = get_field("form")
lemmas = get_field("lemma")
lemma_rules = [gen_lemma_rule(word, lemma)
if lemma != "_" else "_"
for word, lemma in zip(words, lemmas)]
upos_tags = get_field("upostag")
xpos_tags = get_field("xpostag")
feats = get_field("feats", lambda x: "|".join(k + "=" + v for k, v in x.items())
if hasattr(x, "items") else "_")
heads = get_field("head")
dep_rels = get_field("deprel")
dependencies = list(zip(dep_rels, heads))
yield self.text_to_instance(words, lemmas, lemma_rules, upos_tags, xpos_tags,
feats, dependencies, ids, multiword_ids, multiword_forms)
@overrides
def text_to_instance(self, # type: ignore
words: List[str],
lemmas: List[str] = None,
lemma_rules: List[str] = None,
upos_tags: List[str] = None,
xpos_tags: List[str] = None,
feats: List[str] = None,
dependencies: List[Tuple[str, int]] = None,
ids: List[str] = None,
multiword_ids: List[str] = None,
multiword_forms: List[str] = None) -> Instance:
fields: Dict[str, Field] = {}
tokens = TextField([Token(w) for w in words], self._token_indexers)
fields["tokens"] = tokens
names = ["upos", "xpos", "feats", "lemmas"]
all_tags = [upos_tags, xpos_tags, feats, lemma_rules]
for name, field in zip(names, all_tags):
if field:
fields[name] = SequenceLabelField(field, tokens, label_namespace=name)
if dependencies is not None:
# We don't want to expand the label namespace with an additional dummy token, so we'll
# always give the 'ROOT_HEAD' token a label of 'root'.
fields["head_tags"] = SequenceLabelField([x[0] for x in dependencies],
tokens,
label_namespace="head_tags")
fields["head_indices"] = SequenceLabelField([int(x[1]) for x in dependencies],
tokens,
label_namespace="head_index_tags")
fields["metadata"] = MetadataField({
"words": words,
"upos_tags": upos_tags,
"xpos_tags": xpos_tags,
"feats": feats,
"lemmas": lemmas,
"lemma_rules": lemma_rules,
"ids": ids,
"multiword_ids": multiword_ids,
"multiword_forms": multiword_forms
})
return Instance(fields)
| [
"[email protected]"
]
| |
89e171105b6e4fd444900f215d2561e8181b50c7 | 5a628296aea2e3b908e634f8ad7f0d9d49750cf2 | /3dod/configs/car_cfg20_eval_ebm3_test_seq0012.py | 4a7f9f54fd90c0ce2f278918004038ba74020d10 | [
"MIT"
]
| permissive | TianhaoFu/ebms_3dod | 521cf16946abaef77d005e7ee2e5b0a86d1a36fd | b8a33577c079d9a587bca289a707a8b1b3cb4834 | refs/heads/main | 2023-06-03T18:34:00.670739 | 2021-06-21T13:51:53 | 2021-06-21T13:51:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,720 | py | model = dict(
type='SingleStageDetector20',
backbone=dict(
type='SimpleVoxel',
num_input_features=4,
use_norm=True,
num_filters=[32, 64],
with_distance=False),
neck=dict(
type='SpMiddleFHD',
output_shape=[40, 1600, 1408],
num_input_features=4,
num_hidden_features=64 * 5,),
bbox_head=dict(
type='SSDRotateHead',
num_class=1,
num_output_filters=256,
num_anchor_per_loc=2,
use_sigmoid_cls=True,
encode_rad_error_by_sin=True,
use_direction_classifier=True,
box_code_size=7,),
extra_head=dict(
type='PSWarpHead',
grid_offsets = (0., 40.),
featmap_stride=.4,
in_channels=256,
num_class=1,
num_parts=28,)
)
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
pos_iou_thr=0.6,
neg_iou_thr=0.45,
min_pos_iou=0.45, # this one is to limit the force assignment
ignore_iof_thr=-1,
similarity_fn ='NearestIouSimilarity'
),
nms=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
nms_thr=0.7,
min_bbox_size=0
),
allowed_border=0,
pos_weight=-1,
smoothl1_beta=1 / 9.0,
debug=False),
extra=dict(
assigner=dict(
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1,
similarity_fn ='RotateIou3dSimilarity'
)
)
)
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=100,
nms_thr=0.7,
min_bbox_size=0
),
extra=dict(
score_thr=0.3, nms=dict(type='nms', iou_thr=0.1), max_per_img=100, EBM_guided=False, EBM_refine=True, EBM_refine_steps=10)
)
# # dataset settings
# dataset_type = 'KittiLiDAR'
# data_root = '/root/ebms_3dod/3dod/data/KITTI/'
# img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# data = dict(
# imgs_per_gpu=2,
# # workers_per_gpu=4,
# workers_per_gpu=1,
# train=dict(
# type=dataset_type,
# root=data_root + 'object/training/',
# ann_file=data_root + 'ImageSets/train.txt',
# img_prefix=None,
# img_scale=(1242, 375),
# img_norm_cfg=img_norm_cfg,
# size_divisor=32,
# flip_ratio=0.5,
# with_mask=False,
# with_label=True,
# with_point=True,
# class_names = ['Car', 'Van'],
# augmentor=dict(
# type='PointAugmentor',
# root_path=data_root,
# info_path=data_root + 'kitti_dbinfos_trainval.pkl',
# sample_classes=['Car'],
# min_num_points=5,
# sample_max_num=15,
# removed_difficulties=[-1],
# global_rot_range=[-0.78539816, 0.78539816],
# gt_rot_range=[-0.78539816, 0.78539816],
# center_noise_std=[1., 1., .5],
# scale_range=[0.95, 1.05]
# ),
# generator=dict(
# type='VoxelGenerator',
# voxel_size=[0.05, 0.05, 0.1],
# point_cloud_range=[0, -40., -3., 70.4, 40., 1.],
# max_num_points=5,
# max_voxels=20000
# ),
# anchor_generator=dict(
# type='AnchorGeneratorStride',
# sizes=[1.6, 3.9, 1.56],
# anchor_strides=[0.4, 0.4, 1.0],
# anchor_offsets=[0.2, -39.8, -1.78],
# rotations=[0, 1.57],
# ),
# anchor_area_threshold=1,
# out_size_factor=8,
# test_mode=False),
#
# val=dict(
# type=dataset_type,
# root=data_root + 'object/testing/',
# ann_file=data_root + 'ImageSets/test.txt',
# img_prefix=None,
# img_scale=(1242, 375),
# img_norm_cfg=img_norm_cfg,
# size_divisor=32,
# flip_ratio=0,
# with_mask=False,
# with_label=False,
# with_point=True,
# class_names = ['Car'],
# generator=dict(
# type='VoxelGenerator',
# voxel_size=[0.05, 0.05, 0.1],
# point_cloud_range=[0., -40., -3., 70.4, 40., 1.],
# max_num_points=5,
# max_voxels=20000
# ),
# anchor_generator=dict(
# type='AnchorGeneratorStride',
# sizes=[1.6, 3.9, 1.56],
# anchor_strides=[0.4, 0.4, 1.0],
# anchor_offsets=[0.2, -39.8, -1.78],
# rotations=[0, 1.57],
# ),
# anchor_area_threshold=1,
# out_size_factor=8,
# test_mode=True),
# )
# dataset settings
dataset_type = 'KittiVideo'
data_root = '/root/ebms_3dod/3dod/data/KITTI/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
# workers_per_gpu=4,
workers_per_gpu=1,
val=dict(
type=dataset_type,
root=data_root + 'tracking/testing/',
calib_dir = 'calib/0012.txt',
img_dir = 'image_02/0012',
lidar_dir = 'velodyne/0012',
ann_file=data_root + 'ImageSets/test.txt',
img_prefix=None,
img_scale=(1242, 375),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
with_point=True,
class_names = ['Car'],
generator=dict(
type='VoxelGenerator',
voxel_size=[0.05, 0.05, 0.1],
point_cloud_range=[0., -40., -3., 70.4, 40., 1.],
max_num_points=5,
max_voxels=20000
),
anchor_generator=dict(
type='AnchorGeneratorStride',
sizes=[1.6, 3.9, 1.56],
anchor_strides=[0.4, 0.4, 1.0],
anchor_offsets=[0.2, -39.8, -1.78],
rotations=[0, 1.57],
),
anchor_area_threshold=1,
out_size_factor=8,
test_mode=True),
)
# optimizer
optimizer = dict(
type='adam_onecycle', lr=0.003, weight_decay=0.01,
grad_clip=dict(max_norm=10, norm_type=2)
)
# learning policy
lr_config = dict(
policy='onecycle',
moms = [0.95, 0.85],
div_factor = 10,
pct_start = 0.4
)
checkpoint_config = dict(interval=5)
log_config = dict(interval=50)
total_epochs = 80
dist_params = dict(backend='nccl')
log_level = 'INFO'
# work_dir = '../saved_model_vehicle
work_dir = '/root/ebms_3dod/3dod/saved_model_vehicle20'
load_from = None
resume_from = None
workflow = [('train', 1)]
SA_SSD_pretrained = True
SA_SSD_fixed = True
USE_EBM = True
| [
"[email protected]"
]
| |
754a312303ebd319014000b3257ab320ff38a7ee | 57c13a2500561e72e382489c23e9c0b8347be605 | /concurrency/simple_interval_sum_example.py | c4ccc0e400215264a4ef1daac9bc80a5f4004a70 | []
| no_license | linheimx/python_master | 7403d7af639e31810c90b2fba14972a6d3dcfcec | 7fb7c467bedaff1515975807552a0ba05e30f15e | refs/heads/master | 2021-01-21T21:54:55.537994 | 2016-12-23T15:05:14 | 2016-12-23T15:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | """
1 부터 200000000 더하기
real 0m16.303s
user 0m16.271s
sys 0m0.019s
"""
TOTAL_VALUE = 0
def interval_sum(start_num, last_num):
global TOTAL_VALUE
total = 0
for x in range(start_num, last_num + 1):
total += x
TOTAL_VALUE += total
if __name__ == "__main__":
interval_sum(1, 200000000)
print(TOTAL_VALUE)
| [
"[email protected]"
]
| |
7c9abd888ec8c97b3f20c8e59ec550f5a3f2fd02 | 1362bc36e86f8216d405b547f5f45874ac332b1e | /Google/wordBreak2.py | c134aff4c0d863141d8216c688860f6d1559f892 | []
| no_license | zhuolikevin/Algorithm-Practices-Python | ed5ca06758e35d910ffbea011b414b3c57fd6c7a | 1df8d93a8ecb8627899aadddb5dd5c5d0b144cdf | refs/heads/master | 2021-01-22T01:04:31.536327 | 2016-01-15T13:31:07 | 2016-01-15T13:31:07 | 32,602,632 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | class Solution(object):
def wordBreak(self, s, wordDict):
if not wordDict:
return []
# self.find = False
words = self.helper(wordDict, s)
res = []
for word in words:
temp = ' '.join(word)
res.append(temp)
return res
def helper(self, dic, s):
if not dic: return [[]]
if not s:
# self.find = True
return [[]]
res = []
words = []
for i in dic:
words.append(i)
for word in words:
# if self.find:
# break
i = 0
while i < len(word):
if i >= len(s) or s[i] != word[i]:
break
i += 1
if i == len(word):
temp = [word]
dic.remove(word)
remain = self.helper(dic, s[i:])
for solu in remain:
res.append(temp + solu)
dic.append(word)
return res
solution = Solution()
s = 'catsanddog'
wordDict = ["cat", "cats", "and", "sand", "dog"]
print solution.wordBreak(s, wordDict)
| [
"[email protected]"
]
| |
5b32f01d57e592dc5d14905ec3b40ccd35e0ed5d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_caretakers.py | 74fcd5b95fc2e398572ed29030a5ed02a77a231b | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py |
from xai.brain.wordbase.nouns._caretaker import _CARETAKER
#calss header
class _CARETAKERS(_CARETAKER, ):
def __init__(self,):
_CARETAKER.__init__(self)
self.name = "CARETAKERS"
self.specie = 'nouns'
self.basic = "caretaker"
self.jsondata = {}
| [
"[email protected]"
]
| |
e425aeca0e134ac3a5dfe1c8a309f07d4ca01e29 | a2ac73af04a07bb070cd85c88778608b561dd3e4 | /addons/mail/__openerp__.py | 6570b2638ba325740370d4c66b6283b5a31c28d0 | []
| no_license | sannareddy/openerp-heimai | c849586d6099cc7548dec8b3f1cc7ba8be49594a | 58255ecbcea7bf9780948287cf4551ed6494832a | refs/heads/master | 2021-01-15T21:34:46.162550 | 2014-05-13T09:20:37 | 2014-05-13T09:20:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | /usr/share/pyshared/openerp/addons/mail/__openerp__.py | [
"[email protected]"
]
| |
a6687b56092e9d3c2f38e3fbb4e8ddac55c5f439 | 40ba3112a116b361673732efc10402a067322ad1 | /PycharmProjects/untitled/OO/newClass.py | 939b5ed323117618000643bb46f2be85ab8e298b | []
| no_license | oumingwang/----Python | 622be90adffefcab1696bb145b171fa9a8bff5b7 | 003c0b7880de2b9e0737120bc15bf6eaeb7a644f | refs/heads/master | 2020-07-03T03:16:13.687118 | 2017-04-08T16:55:01 | 2017-04-08T16:55:01 | 74,200,264 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | class MyClass (object):
"hello world "
version = 1.1
def MyVersion(self):
pass
c = MyClass()
print c.__class__.__name__
print c.__doc__
print c.__dict__
print c.__module__
print c.__class__.__base__
| [
"[email protected]"
]
| |
808087af7bfe146f810c4487b617e92f6a2db462 | 5237e7939a668261d573c56d300101742b4dfe0d | /38-Abstraction adn Encapsulation.py | 9bc9763b8fa0371e64f2c0c938e11137f6f15bca | []
| no_license | imAtulSharma/Python-Tutorial-Files | fa5dcf855c4fc6008028e680bfe4c7691bd13f25 | 2245a171b9d7146f349f84027f413d796fc99a89 | refs/heads/master | 2022-12-02T00:13:53.231523 | 2020-07-23T05:27:15 | 2020-07-23T05:27:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py |
class Employee:
no_of_leaves = 8
def __init__(self, aname, asalary, arole):
self.name = aname
self.salary = asalary
self.role = arole
def printdetails(self):
return f"The Name is {self.name}. Salary is {self.salary} and role is {self.role}"
@classmethod
def change_leaves(cls, newleaves):
cls.no_of_leaves = newleaves
@classmethod
def from_dash(cls, string):
return cls(*string.split("-"))
@staticmethod
def printgood(string):
print("This is good " + string)
atul = Employee("atul", 255, "Instructor")
rohan = Employee("Rohan", 455, "Student")
karan = Employee.from_dash("Karan-480-Student")
Employee.printgood("Rohan")
| [
"[email protected]"
]
| |
67334df6315dcded9d30edef4d02cb7d9a0f739c | b509ef07d752e987f4cb84d1abd4c3a98488a6c7 | /resources/lib/streamlink/plugins/tamago.py | 0b6dc7197643d4c8de27269ff87d6ea19785b867 | [
"BSD-2-Clause"
]
| permissive | Twilight0/script.module.streamlink.base | d91245d1a43d6b3191b62a6eb4b1cf70598ed23e | c1e4628715a81806586b10323b8cb01424bbb6fc | refs/heads/master | 2021-01-21T04:32:41.658823 | 2020-09-07T20:56:29 | 2020-09-07T20:56:29 | 101,915,967 | 6 | 4 | BSD-2-Clause | 2018-01-14T15:20:47 | 2017-08-30T18:31:47 | Python | UTF-8 | Python | false | false | 1,571 | py | import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
from streamlink import NoStreamsError
class Tamago(Plugin):
_url_re = re.compile(r"https?://(?:player\.)?tamago\.live/w/(?P<id>\d+)")
_api_url_base = "https://player.tamago.live/api/rooms/{id}"
_api_response_schema = validate.Schema({
u"status": 200,
u"message": u"Success",
u"data": {
u"room_number": validate.text,
u"stream": {validate.text: validate.url()}
}
})
_stream_qualities = {
u"150": "144p",
u"350": "360p",
u"550": "540p",
u"900": "720p",
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
user_id = self._url_re.match(self.url).group('id')
try:
api_response = self.session.http.get(self._api_url_base.format(id=user_id))
streams = self.session.http.json(api_response, schema=self._api_response_schema)['data']['stream']
except Exception:
raise NoStreamsError(self.url)
unique_stream_urls = []
for stream in streams.keys():
if streams[stream] not in unique_stream_urls:
unique_stream_urls.append(streams[stream])
quality = self._stream_qualities[stream] if stream in self._stream_qualities.keys() else "720p+"
yield quality, HTTPStream(self.session, streams[stream])
__plugin__ = Tamago
| [
"[email protected]"
]
| |
95932558356e481f28e177b43d77b26fe17a4990 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/remove-duplicates-from-sorted-list.py | 720a0711be4121ce4774e70948432116eff69861 | []
| no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 912 | py | # Time: O(n)
# Space: O(1)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
cur = head
while cur:
runner = cur.next
while runner and runner.val == cur.val:
runner = runner.next
cur.next = runner
cur = runner
return head
def deleteDuplicates2(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head: return head
if head.next:
if head.val == head.next.val:
head = self.deleteDuplicates2(head.next)
else:
head.next = self.deleteDuplicates2(head.next)
return head
| [
"[email protected]"
]
| |
013dba1a446b2feb94b19ec9b2abcba9d5432b7e | 002c14cd622b4890cce1c243065cebe39e2302ec | /LeetCode/105-Construct-Binary-Tree-from-Preorder-and-Inorder-Traversal/Construct-Binary-Tree-from-Preorder-and-Inorder-Traversal.py | e8b16cfce36d0ee1affa25b7da3c6475768bcd33 | [
"MIT"
]
| permissive | hscspring/The-DataStructure-and-Algorithms | 6200eba031eac51b13e320e1fc9f204644933e00 | e704a92e091f2fdf5f27ec433e0e516ccc787ebb | refs/heads/master | 2022-08-29T18:47:52.378884 | 2022-08-25T16:22:44 | 2022-08-25T16:22:44 | 201,743,910 | 11 | 3 | MIT | 2021-04-20T18:28:47 | 2019-08-11T09:26:34 | Python | UTF-8 | Python | false | false | 597 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
if not preorder or not inorder:
return None
root_index = inorder.index(preorder[0])
root = TreeNode(preorder[0])
root.left = self.buildTree(preorder[1: root_index+1], inorder[: root_index])
root.right = self.buildTree(preorder[root_index+1: ], inorder[root_index+1: ])
return root | [
"[email protected]"
]
| |
ed9b903ef6ff142ea75af2d4c6f31beb3fee10d2 | b0eef0efd10556a4b054574fdd2d43124cb0856b | /npbench/benchmarks/polybench/durbin/durbin_cupy.py | 1a84a9ed2fc45b03004a56129ddb6dfd0aa73f2a | [
"BSD-3-Clause"
]
| permissive | learning-chip/npbench | 140d38be2095b54393de6e0008264b54b7cf686b | f2f545afe3603d5c8f1771f26d660f25ce4a3cda | refs/heads/main | 2023-05-10T09:54:52.719759 | 2021-05-31T12:09:48 | 2021-05-31T12:09:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | import cupy as np
def kernel(r):
y = np.empty_like(r)
alpha = -r[0]
beta = 1.0
y[0] = -r[0]
for k in range(1, r.shape[0]):
beta *= 1.0 - alpha * alpha
alpha = -(r[k] + np.dot(np.flip(r[:k]), y[:k])) / beta
y[:k] += alpha * np.flip(y[:k])
y[k] = alpha
return y
| [
"[email protected]"
]
| |
fbdbcf9c89f5f3f1b99414a21d346ac275bb88aa | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/omninote/testcase/firstcases/testcase6_022.py | c47b59270d60bcc1b7d1a8506d1cc9cb435a6e82 | []
| no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,354 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'it.feio.android.omninotes',
'appActivity' : 'it.feio.android.omninotes.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'it.feio.android.omninotes/it.feio.android.omninotes.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase022
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"it.feio.android.omninotes:id/fab_expand_menu_button\").className(\"android.widget.ImageButton\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"it.feio.android.omninotes:id/menu_attachment\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Camera\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_022\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'it.feio.android.omninotes'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
]
| |
668170131eec628085d6cb8f3052a36353d5dd84 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv2/lib/python3.8/site-packages/ansible/plugins/lookup/password.py | e2f24616f25844c59c04002bc4b5e3e51cc5a0e3 | [
"MIT"
]
| permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 12,098 | py | # (c) 2012, Daniel Hokka Zakrisson <[email protected]>
# (c) 2013, Javier Candeira <[email protected]>
# (c) 2013, Maykel Moya <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: password
version_added: "1.1"
author:
- Daniel Hokka Zakrisson <[email protected]>
- Javier Candeira <[email protected]>
- Maykel Moya <[email protected]>
short_description: retrieve or generate a random password, stored in a file
description:
- generates a random plaintext password and stores it in a file at a given filepath.
- If the file exists previously, it will retrieve its contents, behaving just like with_file.
- 'Usage of variables like C("{{ inventory_hostname }}") in the filepath can be used to set up random passwords per host,
which simplifies password management in C("host_vars") variables.'
- A special case is using /dev/null as a path. The password lookup will generate a new random password each time,
but will not write it to /dev/null. This can be used when you need a password without storing it on the controller.
options:
_terms:
description:
- path to the file that stores/will store the passwords
required: True
encrypt:
description:
- Whether the user requests that this password is returned encrypted or in plain text.
- Note that the password is always stored as plain text.
- Encrypt also forces saving the salt value for idempotence.
type: boolean
default: True
chars:
version_added: "1.4"
description:
- Define comma separeted list of names that compose a custom character set in the generated passwords.
- 'By default generated passwords contain a random mix of upper and lowercase ASCII letters, the numbers 0-9 and punctuation (". , : - _").'
- "They can be either parts of Python's string module attributes (ascii_letters,digits, etc) or are used literally ( :, -)."
- "To enter comma use two commas ',,' somewhere - preferably at the end. Quotes and double quotes are not supported."
type: string
length:
description: The length of the generated password.
default: 20
type: integer
notes:
- A great alternative to the password lookup plugin,
if you don't need to generate random passwords on a per-host basis,
would be to use Vault in playbooks.
Read the documentation there and consider using it first,
it will be more desirable for most applications.
- If the file already exists, no data will be written to it.
If the file has contents, those contents will be read in as the password.
Empty files cause the password to return as an empty string.
- 'As all lookups, this runs on the Ansible host as the user running the playbook, and "become" does not apply,
the target file must be readable by the playbook user, or, if it does not exist,
the playbook user must have sufficient privileges to create it.
(So, for example, attempts to write into areas such as /etc will fail unless the entire playbook is being run as root).'
"""
EXAMPLES = """
- name: create a mysql user with a random password
mysql_user:
name: "{{ client }}"
password: "{{ lookup('password', 'credentials/' + client + '/' + tier + '/' + role + '/mysqlpassword length=15') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create a mysql user with a random password using only ascii letters
mysql_user: name={{ client }} password="{{ lookup('password', '/tmp/passwordfile chars=ascii_letters') }}" priv='{{ client }}_{{ tier }}_{{ role }}.*:ALL'
- name: create a mysql user with a random password using only digits
mysql_user:
name: "{{ client }}"
password: "{{ lookup('password', '/tmp/passwordfile chars=digits') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
- name: create a mysql user with a random password using many different char sets
mysql_user:
name: "{{ client }}"
password" "{{ lookup('password', '/tmp/passwordfile chars=ascii_letters,digits,hexdigits,punctuation') }}"
priv: "{{ client }}_{{ tier }}_{{ role }}.*:ALL"
"""
RETURN = """
_raw:
description:
- a password
"""
import os
import string
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
from ansible.utils.encrypt import do_encrypt, random_password
from ansible.utils.path import makedirs_safe
DEFAULT_LENGTH = 20
VALID_PARAMS = frozenset(('length', 'encrypt', 'chars'))
def _parse_parameters(term):
"""Hacky parsing of params
See https://github.com/ansible/ansible-modules-core/issues/1968#issuecomment-136842156
and the first_found lookup For how we want to fix this later
"""
first_split = term.split(' ', 1)
if len(first_split) <= 1:
# Only a single argument given, therefore it's a path
relpath = term
params = dict()
else:
relpath = first_split[0]
params = parse_kv(first_split[1])
if '_raw_params' in params:
# Spaces in the path?
relpath = u' '.join((relpath, params['_raw_params']))
del params['_raw_params']
# Check that we parsed the params correctly
if not term.startswith(relpath):
# Likely, the user had a non parameter following a parameter.
# Reject this as a user typo
raise AnsibleError('Unrecognized value after key=value parameters given to password lookup')
# No _raw_params means we already found the complete path when
# we split it initially
# Check for invalid parameters. Probably a user typo
invalid_params = frozenset(params.keys()).difference(VALID_PARAMS)
if invalid_params:
raise AnsibleError('Unrecognized parameter(s) given to password lookup: %s' % ', '.join(invalid_params))
# Set defaults
params['length'] = int(params.get('length', DEFAULT_LENGTH))
params['encrypt'] = params.get('encrypt', None)
params['chars'] = params.get('chars', None)
if params['chars']:
tmp_chars = []
if u',,' in params['chars']:
tmp_chars.append(u',')
tmp_chars.extend(c for c in params['chars'].replace(u',,', u',').split(u',') if c)
params['chars'] = tmp_chars
else:
# Default chars for password
params['chars'] = [u'ascii_letters', u'digits', u".,:-_"]
return relpath, params
def _read_password_file(b_path):
"""Read the contents of a password file and return it
:arg b_path: A byte string containing the path to the password file
:returns: a text string containing the contents of the password file or
None if no password file was present.
"""
content = None
if os.path.exists(b_path):
with open(b_path, 'rb') as f:
b_content = f.read().rstrip()
content = to_text(b_content, errors='surrogate_or_strict')
return content
def _gen_candidate_chars(characters):
'''Generate a string containing all valid chars as defined by ``characters``
:arg characters: A list of character specs. The character specs are
shorthand names for sets of characters like 'digits', 'ascii_letters',
or 'punctuation' or a string to be included verbatim.
The values of each char spec can be:
* a name of an attribute in the 'strings' module ('digits' for example).
The value of the attribute will be added to the candidate chars.
* a string of characters. If the string isn't an attribute in 'string'
module, the string will be directly added to the candidate chars.
For example::
characters=['digits', '?|']``
will match ``string.digits`` and add all ascii digits. ``'?|'`` will add
the question mark and pipe characters directly. Return will be the string::
u'0123456789?|'
'''
chars = []
for chars_spec in characters:
# getattr from string expands things like "ascii_letters" and "digits"
# into a set of characters.
chars.append(to_text(getattr(string, to_native(chars_spec), chars_spec),
errors='strict'))
chars = u''.join(chars).replace(u'"', u'').replace(u"'", u'')
return chars
def _random_salt():
"""Return a text string suitable for use as a salt for the hash functions we use to encrypt passwords.
"""
# Note passlib salt values must be pure ascii so we can't let the user
# configure this
salt_chars = _gen_candidate_chars(['ascii_letters', 'digits', './'])
return random_password(length=8, chars=salt_chars)
def _parse_content(content):
'''parse our password data format into password and salt
:arg content: The data read from the file
:returns: password and salt
'''
password = content
salt = None
salt_slug = u' salt='
try:
sep = content.rindex(salt_slug)
except ValueError:
# No salt
pass
else:
salt = password[sep + len(salt_slug):]
password = content[:sep]
return password, salt
def _format_content(password, salt, encrypt=True):
"""Format the password and salt for saving
:arg password: the plaintext password to save
:arg salt: the salt to use when encrypting a password
:arg encrypt: Whether the user requests that this password is encrypted.
Note that the password is saved in clear. Encrypt just tells us if we
must save the salt value for idempotence. Defaults to True.
:returns: a text string containing the formatted information
.. warning:: Passwords are saved in clear. This is because the playbooks
expect to get cleartext passwords from this lookup.
"""
if not encrypt and not salt:
return password
# At this point, the calling code should have assured us that there is a salt value.
if not salt:
raise AnsibleAssertionError('_format_content was called with encryption requested but no salt value')
return u'%s salt=%s' % (password, salt)
def _write_password_file(b_path, content):
b_pathdir = os.path.dirname(b_path)
makedirs_safe(b_pathdir, mode=0o700)
with open(b_path, 'wb') as f:
os.chmod(b_path, 0o600)
b_content = to_bytes(content, errors='surrogate_or_strict') + b'\n'
f.write(b_content)
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
relpath, params = _parse_parameters(term)
path = self._loader.path_dwim(relpath)
b_path = to_bytes(path, errors='surrogate_or_strict')
chars = _gen_candidate_chars(params['chars'])
changed = False
content = _read_password_file(b_path)
if content is None or b_path == to_bytes('/dev/null'):
plaintext_password = random_password(params['length'], chars)
salt = None
changed = True
else:
plaintext_password, salt = _parse_content(content)
if params['encrypt'] and not salt:
changed = True
salt = _random_salt()
if changed and b_path != to_bytes('/dev/null'):
content = _format_content(plaintext_password, salt, encrypt=params['encrypt'])
_write_password_file(b_path, content)
if params['encrypt']:
password = do_encrypt(plaintext_password, params['encrypt'], salt=salt)
ret.append(password)
else:
ret.append(plaintext_password)
return ret
| [
"[email protected]"
]
| |
bff42ee4b49a59c4a1c91ef65285fd2eafdf4ea4 | 1b8d162160f5ab6d6a6b8940b8ab83b482abb409 | /tests/query/test_wildcard.py | 3613314b733134f56f4e05918918bba4a6c1ca75 | [
"Apache-2.0"
]
| permissive | jlinn/pylastica | f81e438a109dfe06adc7e9b70fdf794c5d01a53f | 0fbf68ed3e17d665e3cdf1913444ebf1f72693dd | refs/heads/master | 2020-05-19T14:07:38.794717 | 2014-07-23T23:43:00 | 2014-07-23T23:43:00 | 10,442,284 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | __author__ = 'Joe Linn'
import unittest
import pylastica
from tests.base import Base
class WildcardTest(unittest.TestCase, Base):
def test_search_with_analyzer(self):
client = self._get_client()
index = client.get_index('test')
index_params = {
'analysis': {
'analyzer': {
'lw': {
'type': 'custom',
'tokenizer': 'keyword',
'filter': ['lowercase']
}
}
}
}
index.create(index_params, True)
doc_type = index.get_doc_type('test')
mapping = pylastica.doc_type.Mapping(doc_type, {
'name': {'type': 'string', 'store': 'no', 'analyzer': 'la'}
})
doc_type.mapping = mapping
doc_type.add_document(pylastica.Document(1, {'name': 'San Diego'}))
doc_type.add_document(pylastica.Document(2, {'name': 'San Luis Obispo'}))
doc_type.add_document(pylastica.Document(3, {'name': 'San Francisco'}))
doc_type.add_document(pylastica.Document(4, {'name': 'Chicago'}))
doc_type.add_document(pylastica.Document(5, {'name': 'London'}))
index.refresh()
query = pylastica.query.Wildcard()
query.set_value('name', 'sa*')
result_set = doc_type.search(query)
self.assertEqual(3, len(result_set))
query = pylastica.query.Wildcard()
query.set_value('name', 'ch*')
result_set = doc_type.search(query)
self.assertEqual(1, len(result_set))
index.delete()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
20eb97d8a227b49c674e29cf693eee401713bdc7 | 05263538c3ad0f577cdbbdb9bac87dcf450230ce | /alexa/ask-sdk/ask_sdk_model/services/directive/error.py | 5bd50a4d2a1931a4237389af00b0942f568d9058 | []
| no_license | blairharper/ISS-GoogleMap-project | cea027324fc675a9a309b5277de99fc0265dcb80 | 3df119036b454a0bb219af2d703195f4154a2471 | refs/heads/master | 2020-03-21T16:47:21.046174 | 2018-10-24T08:05:57 | 2018-10-24T08:05:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,520 | py | # coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
class Error(object):
"""
NOTE: This class is auto generated.
Do not edit the class manually.
:param code: error code to find more information in developer.amazon.com. # noqa: E501
:type code: (optional) int
:param message: Readable description of error. # noqa: E501
:type message: (optional) str
"""
deserialized_types = {
'code': 'int',
'message': 'str'
}
attribute_map = {
'code': 'code',
'message': 'message'
}
def __init__(self, code=None, message=None): # noqa: E501
# type: (Optional[int], Optional[str]) -> None
"""
:param code: error code to find more information in developer.amazon.com. # noqa: E501
:type code: (optional) int
:param message: Readable description of error. # noqa: E501
:type message: (optional) str
"""
self.__discriminator_value = None
self.code = code
self.message = message
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Error):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
36246774e46c9b6cd0ae0f29d7f7be2713617944 | b500996a0b29829fde6afe8b23178ca9df4a239d | /rydinfap/src/apps/assetpartpurch.py | 8aaafda1429311c01fc8ec79204b265b934722ab | []
| no_license | eocampo2000/test-code | 48c4d444e323eef5e6fe7e61b018952ef3cd4134 | 49328664243e1a9daf9c567d1aaaa19fd4654c02 | refs/heads/master | 2016-08-11T07:35:31.346464 | 2016-02-13T12:33:55 | 2016-02-13T12:33:55 | 51,642,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,303 | py | '''
Created on Jan 6, 2015
@author: eocampo
'''
'''
Created on Aug 20, 2014
@author: eocampo
'''
__version__ = '20150102'
import sys
import utils.strutils as su
import procdata.procinfa as pi
import procjobs.procsched as psc
import utils.fileutils as fu
from apps.infbaseapp import _InfaBaseApp
# Mandatory to define self.cmdStep
# method _getNextRunDate is sensitive to schedule changes !
RUN_PER_DAY = 1 # Daily runs.
DP_LEN = len('YYYYMM')
# Schedules
SCH_FREQ = 'Mthly'
sch = ()
cur_dayr = su.getTodayDtStr('%Y%m')
class AssetPartPurch(_InfaBaseApp):
exitOnError = True
def __init__(self):
super(AssetPartPurch,self).__init__()
self.landDir = ''
self.incFileSet = [] # Incoming Files. Contains full path name.
self.incFiles = []
self.workFiles = [] # Files that were moved to the working dir (ideally same than incSetFile).
self.trigFiles = [] # Incoming Trigger File.
self.fileDate = ''
self.FILE_SET_LEN = 1
self.ts = su.getTimeSTamp()
# Allowable commands for this application. Make sure to Set
self.cmdStep = { 'A' : self.getLock ,
'B' : self.isWorkDayWarn ,
'C' : self.chkNextRunFlg ,
'D' : self.procAssetPartPurch ,
}
# Infa Environmental variables/
self.infaEnvVar = {
'PMCMD' : 'mg.pmcmd' ,
'INFA_USER' : 'self.ib.rep_user' ,
'INFA_XPWD' : 'self.ib.rep_xpwd' ,
'DOMAIN' : 'self.ib.dom_name' ,
'INT_SERV' : 'self.ib.IS' ,
'INFA_SHARE' : 'self.ib.shareDir' ,
'INFA_APP_CFG' : 'self.ib.cfgDir' ,
'INFA_APP_LCK' : 'self.ib.lckDir' ,
'INFA_APP_CTL' : 'self.ib.ctlDir' ,
}
def _setDataDir(self) : return 0
def _wkf_asst_part_purch(self):
self.ib.fld = 'Asset'
self.ib.wkf = 'wkf_part_purchasing_dim_monthly'
rc = pi.runWkflWait(self.ib,self.log)
if rc != 0 :
self.log.error('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
else :
self.log.info('Running %s.%s rc = %s' % (self.ib.fld,self.ib.wkf,rc))
return rc
def procAssetPartPurch(self):
ctlFile = '%s/%s.ctl' % (self.ib.ctlDir,self.appName)
self.log.debug('self.checkNextRunFlg is %s' % self.checkNextRunFlg)
prev_dayr = self._getCtlFile()
if self.checkNextRunFlg is True:
if prev_dayr is None or prev_dayr.strip() == '':
self.log.error("Could not find control file or No Data")
return -1
rc = psc.getNextRunDate(prev_dayr, cur_dayr, SCH_FREQ, self.log,sch)
if rc != 0 :
self.log.error("self._chkNextRun rc = %s" % rc)
return rc
# Run workflows
if self._wkf_asst_part_purch() != 0 : return 1
# Loading Staging Succeeded. Update the control file.
rc = fu.updFile(ctlFile,cur_dayr)
if rc == 0 :
if self.checkNextRunFlg: self.log.info('Updated Cur Load Date from %s to %s , Control File %s' % (prev_dayr,cur_dayr, ctlFile))
else : self.log.info('Overwriting Cur Load Date from %s to %s , Control File %s' % (prev_dayr,cur_dayr, ctlFile))
else :
self.log.error('Could not Update Load Date %s, Control File %s rc = %s' % (cur_dayr,ctlFile,rc))
return rc
def main(Args):
a = AssetPartPurch()
rc = a.main(Args)
return rc
if __name__ == '__main__':
from setwinenv import setEnvVars # Remove in UX
setEnvVars() # Remove in UX
rc= main(sys.argv)
| [
"[email protected]"
]
| |
d72f03c6696ae620de2f185352ac0ee64c52ce40 | 8bd6b0784de9a1e6a39d0f5f23f2d8fb50c73d49 | /MethodRefine/logistics/MethodRefine/logistics_benchmark-high/validating/validating_33.py | 6bd1c737b7c18109ca58e18e42baaa3f5f355f85 | []
| no_license | sysulic/MethodRefine | a483d74e65337dff4bc2539ce3caa3bf83748b48 | adbb22d4663041d853d3132f75032b7561bf605c | refs/heads/master | 2020-09-14T10:45:55.948174 | 2020-05-01T09:13:59 | 2020-05-01T09:13:59 | 223,104,986 | 3 | 2 | null | 2020-04-27T11:01:36 | 2019-11-21T06:33:16 | Python | UTF-8 | Python | false | false | 1,654 | py | #!/usr/bin/env python
# coding=utf-8
import sys
sys.path.insert(0, './')
from logistic import *
import new_tihtn_planner
state0 = new_tihtn_planner.State('state0')
allow = False
state0.loc = {'truck1':('city1','loc1'),'truck2':('city2','loc1'),'truck3':('city3','loc2'),'truck4':('city4','loc1'),'truck5':('city5','loc1'),'plane1':('city3','loc1'),'pkg1':('city3','loc1'),'pkg2':('city5','loc1'),'pkg3':('city2','loc1'),'pkg4':('city2','loc1'),}
state0.load = {'truck1':False,'truck2':False,'truck3':False,'truck4':False,'truck5':False,'plane1':False,}
state0.plane_nums = 1
new_tihtn_planner.declare_types({'location':[('city1','loc1'),('city1','loc2'),('city2','loc1'),('city2','loc2'),('city3','loc1'),('city3','loc2'),('city4','loc1'),('city4','loc2'),('city5','loc1'),('city5','loc2'),],'truck':['truck1','truck2','truck3','truck4','truck5',],'plane':['plane1',],'pkg':['pkg1','pkg2','pkg3','pkg4',]})
new_tihtn_planner.declare_funs({load_plane:['pkg', 'location', 'plane'],load_truck:['pkg', 'location', 'truck'],by_plane:['plane', 'location'],drive_truck:['truck', 'location'], unload_truck:['pkg', 'location', 'truck'],unload_plane:['pkg', 'location', 'plane']})
new_tihtn_planner.instance()
def execute(completable):
return new_tihtn_planner.pyhop(completable, allow, state0,[('delievery','pkg1',('city5','loc2')),('delievery','pkg2',('city5','loc2')),('delievery','pkg3',('city5','loc2')),('delievery','pkg4',('city4','loc1')),],[[0, 1],[1, 2],[2, 3],], 9)
def add_methods(fun_obj_list):
for fun in fun_obj_list:
new_tihtn_planner.add_method(fun.func_name.split('__')[0], fun)
def reverse_methods():
new_tihtn_planner.reverse_methods() | [
"[email protected]"
]
| |
a6bb68f32efef496538748681b5a0a3d34d2fd67 | 4d7d2d44410ce1787ce3255dff2be9e5317535a7 | /apps/users/api/api.py | f0d5bf10e43dbea45b24f37110811e50a65fc02e | []
| no_license | Noeuclides/palindrome_api | 931533148cc2a2d4a5155d717ecb9559c1c30d12 | cb5bc8873f953121d4785fe62ef6b49ec2fdd996 | refs/heads/master | 2023-03-17T01:37:59.429450 | 2021-03-17T22:17:20 | 2021-03-17T22:17:20 | 348,148,575 | 0 | 0 | null | 2021-03-17T15:23:41 | 2021-03-15T23:10:58 | Python | UTF-8 | Python | false | false | 2,849 | py | from django.http import Http404, response
from django.contrib.auth import authenticate
from rest_framework import status, generics, permissions
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.views import APIView
from rest_framework_jwt.settings import api_settings
from apps.users.models import User
from apps.users.api.serializers import UserSerializer, UserListSerializer, LoginSerializer
VALUES = ['id', 'name', 'last_name', 'username', 'email', 'password']
class UserAPIView(APIView):
def get(self, request: Request) -> Response:
users = User.objects.all().values(*VALUES)
users_serielizers = UserListSerializer(users, many=True)
return Response(users_serielizers.data)
def post(self, request: Request) -> Response:
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
response = {
"user": serializer.data,
}
return Response(response, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDetailAPIView(APIView):
def get_object(self, pk: int) -> User:
try:
return User.objects.get(pk=pk)
except User.DoesNotExist:
raise Http404
def get(self, request: Request, pk: int) -> Response:
user = self.get_object(pk)
user_serielizers = UserSerializer(user)
return Response(user_serielizers.data)
def put(self, request: Request, pk: int) -> Response:
user = self.get_object(pk)
serializer = UserSerializer(user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request: Request, pk: int) -> Response:
user = self.get_object(pk)
user.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class LoginAPIView(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {
'success' : 'True',
'status code' : status.HTTP_200_OK,
'message': 'User logged in successfully',
'token' : serializer.data['token'],
}
status_code = status.HTTP_200_OK
return Response(response, status=status_code)
class UserRetrieveView(generics.RetrieveAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
| [
"[email protected]"
]
| |
6bb0be91de37638b0ab74754c09135d1a4a7d0a3 | 78137d5e4e688749399bbb386b26536e4ac6d9fa | /pytorch3d/transforms/so3.py | 59c499e328bf3de02883d211d1a61e9895cb56c5 | [
"MIT",
"BSD-3-Clause"
]
| permissive | bruinxiong/pytorch3d | 4235681c6356f7e69fa506d8474a3c7cf83d9fe6 | 18a3c5cbb9055bcda44590d39db65bb0c74db799 | refs/heads/master | 2022-06-18T16:28:39.589229 | 2022-05-18T20:11:36 | 2022-05-18T20:11:36 | 238,892,798 | 0 | 0 | NOASSERTION | 2022-05-18T20:11:37 | 2020-02-07T10:04:39 | Python | UTF-8 | Python | false | false | 10,029 | py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Tuple
import torch
from ..transforms import acos_linear_extrapolation
def so3_relative_angle(
R1: torch.Tensor,
R2: torch.Tensor,
cos_angle: bool = False,
cos_bound: float = 1e-4,
eps: float = 1e-4,
) -> torch.Tensor:
"""
Calculates the relative angle (in radians) between pairs of
rotation matrices `R1` and `R2` with `angle = acos(0.5 * (Trace(R1 R2^T)-1))`
.. note::
This corresponds to a geodesic distance on the 3D manifold of rotation
matrices.
Args:
R1: Batch of rotation matrices of shape `(minibatch, 3, 3)`.
R2: Batch of rotation matrices of shape `(minibatch, 3, 3)`.
cos_angle: If==True return cosine of the relative angle rather than
the angle itself. This can avoid the unstable calculation of `acos`.
cos_bound: Clamps the cosine of the relative rotation angle to
[-1 + cos_bound, 1 - cos_bound] to avoid non-finite outputs/gradients
of the `acos` call. Note that the non-finite outputs/gradients
are returned when the angle is requested (i.e. `cos_angle==False`)
and the rotation angle is close to 0 or π.
eps: Tolerance for the valid trace check of the relative rotation matrix
in `so3_rotation_angle`.
Returns:
Corresponding rotation angles of shape `(minibatch,)`.
If `cos_angle==True`, returns the cosine of the angles.
Raises:
ValueError if `R1` or `R2` is of incorrect shape.
ValueError if `R1` or `R2` has an unexpected trace.
"""
R12 = torch.bmm(R1, R2.permute(0, 2, 1))
return so3_rotation_angle(R12, cos_angle=cos_angle, cos_bound=cos_bound, eps=eps)
def so3_rotation_angle(
R: torch.Tensor,
eps: float = 1e-4,
cos_angle: bool = False,
cos_bound: float = 1e-4,
) -> torch.Tensor:
"""
Calculates angles (in radians) of a batch of rotation matrices `R` with
`angle = acos(0.5 * (Trace(R)-1))`. The trace of the
input matrices is checked to be in the valid range `[-1-eps,3+eps]`.
The `eps` argument is a small constant that allows for small errors
caused by limited machine precision.
Args:
R: Batch of rotation matrices of shape `(minibatch, 3, 3)`.
eps: Tolerance for the valid trace check.
cos_angle: If==True return cosine of the rotation angles rather than
the angle itself. This can avoid the unstable
calculation of `acos`.
cos_bound: Clamps the cosine of the rotation angle to
[-1 + cos_bound, 1 - cos_bound] to avoid non-finite outputs/gradients
of the `acos` call. Note that the non-finite outputs/gradients
are returned when the angle is requested (i.e. `cos_angle==False`)
and the rotation angle is close to 0 or π.
Returns:
Corresponding rotation angles of shape `(minibatch,)`.
If `cos_angle==True`, returns the cosine of the angles.
Raises:
ValueError if `R` is of incorrect shape.
ValueError if `R` has an unexpected trace.
"""
N, dim1, dim2 = R.shape
if dim1 != 3 or dim2 != 3:
raise ValueError("Input has to be a batch of 3x3 Tensors.")
rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2]
if ((rot_trace < -1.0 - eps) + (rot_trace > 3.0 + eps)).any():
raise ValueError("A matrix has trace outside valid range [-1-eps,3+eps].")
# phi ... rotation angle
phi_cos = (rot_trace - 1.0) * 0.5
if cos_angle:
return phi_cos
else:
if cos_bound > 0.0:
bound = 1.0 - cos_bound
return acos_linear_extrapolation(phi_cos, (-bound, bound))
else:
return torch.acos(phi_cos)
def so3_exp_map(log_rot: torch.Tensor, eps: float = 0.0001) -> torch.Tensor:
"""
Convert a batch of logarithmic representations of rotation matrices `log_rot`
to a batch of 3x3 rotation matrices using Rodrigues formula [1].
In the logarithmic representation, each rotation matrix is represented as
a 3-dimensional vector (`log_rot`) who's l2-norm and direction correspond
to the magnitude of the rotation angle and the axis of rotation respectively.
The conversion has a singularity around `log(R) = 0`
which is handled by clamping controlled with the `eps` argument.
Args:
log_rot: Batch of vectors of shape `(minibatch, 3)`.
eps: A float constant handling the conversion singularity.
Returns:
Batch of rotation matrices of shape `(minibatch, 3, 3)`.
Raises:
ValueError if `log_rot` is of incorrect shape.
[1] https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
"""
return _so3_exp_map(log_rot, eps=eps)[0]
def so3_exponential_map(log_rot: torch.Tensor, eps: float = 0.0001) -> torch.Tensor:
warnings.warn(
"""so3_exponential_map is deprecated,
Use so3_exp_map instead.
so3_exponential_map will be removed in future releases.""",
PendingDeprecationWarning,
)
return so3_exp_map(log_rot, eps)
def _so3_exp_map(
log_rot: torch.Tensor, eps: float = 0.0001
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
A helper function that computes the so3 exponential map and,
apart from the rotation matrix, also returns intermediate variables
that can be re-used in other functions.
"""
_, dim = log_rot.shape
if dim != 3:
raise ValueError("Input tensor shape has to be Nx3.")
nrms = (log_rot * log_rot).sum(1)
# phis ... rotation angles
rot_angles = torch.clamp(nrms, eps).sqrt()
rot_angles_inv = 1.0 / rot_angles
fac1 = rot_angles_inv * rot_angles.sin()
fac2 = rot_angles_inv * rot_angles_inv * (1.0 - rot_angles.cos())
skews = hat(log_rot)
skews_square = torch.bmm(skews, skews)
R = (
# pyre-fixme[16]: `float` has no attribute `__getitem__`.
fac1[:, None, None] * skews
+ fac2[:, None, None] * skews_square
+ torch.eye(3, dtype=log_rot.dtype, device=log_rot.device)[None]
)
return R, rot_angles, skews, skews_square
def so3_log_map(
R: torch.Tensor, eps: float = 0.0001, cos_bound: float = 1e-4
) -> torch.Tensor:
"""
Convert a batch of 3x3 rotation matrices `R`
to a batch of 3-dimensional matrix logarithms of rotation matrices
The conversion has a singularity around `(R=I)` which is handled
by clamping controlled with the `eps` and `cos_bound` arguments.
Args:
R: batch of rotation matrices of shape `(minibatch, 3, 3)`.
eps: A float constant handling the conversion singularity.
cos_bound: Clamps the cosine of the rotation angle to
[-1 + cos_bound, 1 - cos_bound] to avoid non-finite outputs/gradients
of the `acos` call when computing `so3_rotation_angle`.
Note that the non-finite outputs/gradients are returned when
the rotation angle is close to 0 or π.
Returns:
Batch of logarithms of input rotation matrices
of shape `(minibatch, 3)`.
Raises:
ValueError if `R` is of incorrect shape.
ValueError if `R` has an unexpected trace.
"""
N, dim1, dim2 = R.shape
if dim1 != 3 or dim2 != 3:
raise ValueError("Input has to be a batch of 3x3 Tensors.")
phi = so3_rotation_angle(R, cos_bound=cos_bound, eps=eps)
phi_sin = torch.sin(phi)
# We want to avoid a tiny denominator of phi_factor = phi / (2.0 * phi_sin).
# Hence, for phi_sin.abs() <= 0.5 * eps, we approximate phi_factor with
# 2nd order Taylor expansion: phi_factor = 0.5 + (1.0 / 12) * phi**2
phi_factor = torch.empty_like(phi)
ok_denom = phi_sin.abs() > (0.5 * eps)
phi_factor[~ok_denom] = 0.5 + (phi[~ok_denom] ** 2) * (1.0 / 12)
phi_factor[ok_denom] = phi[ok_denom] / (2.0 * phi_sin[ok_denom])
log_rot_hat = phi_factor[:, None, None] * (R - R.permute(0, 2, 1))
log_rot = hat_inv(log_rot_hat)
return log_rot
def hat_inv(h: torch.Tensor) -> torch.Tensor:
"""
Compute the inverse Hat operator [1] of a batch of 3x3 matrices.
Args:
h: Batch of skew-symmetric matrices of shape `(minibatch, 3, 3)`.
Returns:
Batch of 3d vectors of shape `(minibatch, 3, 3)`.
Raises:
ValueError if `h` is of incorrect shape.
ValueError if `h` not skew-symmetric.
[1] https://en.wikipedia.org/wiki/Hat_operator
"""
N, dim1, dim2 = h.shape
if dim1 != 3 or dim2 != 3:
raise ValueError("Input has to be a batch of 3x3 Tensors.")
ss_diff = torch.abs(h + h.permute(0, 2, 1)).max()
HAT_INV_SKEW_SYMMETRIC_TOL = 1e-5
if float(ss_diff) > HAT_INV_SKEW_SYMMETRIC_TOL:
raise ValueError("One of input matrices is not skew-symmetric.")
x = h[:, 2, 1]
y = h[:, 0, 2]
z = h[:, 1, 0]
v = torch.stack((x, y, z), dim=1)
return v
def hat(v: torch.Tensor) -> torch.Tensor:
"""
Compute the Hat operator [1] of a batch of 3D vectors.
Args:
v: Batch of vectors of shape `(minibatch , 3)`.
Returns:
Batch of skew-symmetric matrices of shape
`(minibatch, 3 , 3)` where each matrix is of the form:
`[ 0 -v_z v_y ]
[ v_z 0 -v_x ]
[ -v_y v_x 0 ]`
Raises:
ValueError if `v` is of incorrect shape.
[1] https://en.wikipedia.org/wiki/Hat_operator
"""
N, dim = v.shape
if dim != 3:
raise ValueError("Input vectors have to be 3-dimensional.")
h = torch.zeros((N, 3, 3), dtype=v.dtype, device=v.device)
x, y, z = v.unbind(1)
h[:, 0, 1] = -z
h[:, 0, 2] = y
h[:, 1, 0] = z
h[:, 1, 2] = -x
h[:, 2, 0] = -y
h[:, 2, 1] = x
return h
| [
"[email protected]"
]
| |
8d733a6f9844f95ae270ebba18d3ce7204c182df | 7833e3f6e979dac7fd5f321ec8ba63fe1db188d6 | /srecanje2/matematika.py | d4dd8e48de6d8cee05392cd7c543a6da52caa95d | []
| no_license | jO-Osko/Krozek-python | 93865fd79d06ef5890e99c10f38bd94d308d4a70 | 787861fdeff625fc64b9ef0532a341992495713d | refs/heads/master | 2023-04-03T08:47:00.414804 | 2021-04-06T14:52:27 | 2021-04-06T14:52:27 | 305,398,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | # + - / * **
# // %
# // -> Celštevilsko deljenje
# % -> ostanek pri deljenju (modulo, modulus)
# vpisano <- int(input())
vpisano = int(input("Vnesi število"))
if vpisano % 2 != 0:
print("Vnesel si liho število")
else:
print("Vnesel si sodo število")
| [
"[email protected]"
]
| |
8a16ba48d7f52c945a9074f8d6397b88610d3699 | 74984afb8ac988ad56cb887cf1ae76e0580ceaf4 | /transposition.py | 853d98622852161725859684d7a471b899718f99 | []
| no_license | eBLDR/Criptography | e440786f1a8d2c2bc5d24a1e6d7f005fae6fd28a | f08974d8d2dd95087afb3d2f1b91419df0959371 | refs/heads/master | 2020-03-26T06:19:30.809445 | 2018-12-27T11:33:29 | 2018-12-27T11:33:29 | 144,599,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | """
Caesar cipher (substitution method) - by BLDR 2018
"""
from math import ceil
from cipher import Cipher
class TranspositionCipher(Cipher):
def __init__(self):
super().__init__()
self.possible_modes.update({'E': 'Encryption', 'D': 'Decryption'})
@staticmethod
def cipher_info():
print("Transposition cipher is a method of encryption by which the positions "
"held by units of plaintext (which are commonly characters or groups of "
"characters) are shifted according to a regular system, so that the ciphertext "
"constitutes a permutation of the plaintext.")
def run(self):
print('=== Transposition cipher method ===\n')
self.initialise(accept_numbers=True)
self.main()
def set_key(self):
while not self.key:
key = input('Insert key (any integer): ')
if key.isdigit():
self.key = int(key)
def process_message(self, key, decrypt=False):
msg_code = ''
msg_length = len(self.input_message)
pointer_jump = ceil(msg_length / key) if decrypt else key
for index in range(pointer_jump):
pointer = index
while pointer < msg_length:
msg_code += self.input_message[pointer]
pointer += pointer_jump
return msg_code
if __name__ == '__main__':
transposition_crypt = TranspositionCipher()
transposition_crypt.run()
| [
"[email protected]"
]
| |
61a2f990ef9a356a88d7d85955221ad3a775d426 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/baiduads/adgroup/model/add_adgroup_request_wrapper.py | 47e02cb7bd072d1d82b9721772db383bcf7155fb | [
"Apache-2.0"
]
| permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 11,549 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.adgroup.model.api_adgroup_add_request import ApiAdgroupAddRequest
from baiduads.common.model.api_request_header import ApiRequestHeader
globals()['ApiAdgroupAddRequest'] = ApiAdgroupAddRequest
globals()['ApiRequestHeader'] = ApiRequestHeader
class AddAdgroupRequestWrapper(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'header': (ApiRequestHeader,), # noqa: E501
'body': (ApiAdgroupAddRequest,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'header': 'header', # noqa: E501
'body': 'body', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AddAdgroupRequestWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiRequestHeader): [optional] # noqa: E501
body (ApiAdgroupAddRequest): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AddAdgroupRequestWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiRequestHeader): [optional] # noqa: E501
body (ApiAdgroupAddRequest): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
]
| |
9669a8c4600ec1a30ac78a7a7ae868e06ecef733 | d8edd97f8f8dea3f9f02da6c40d331682bb43113 | /train_wgandist_AdaB_rectify_aeloss.py | 124753e5a96ce3abaeb72c48fc2197b3c60542dd | []
| no_license | mdubouch/noise-gan | bdd5b2fff3aff70d5f464150443d51c2192eeafd | 639859ec4a2aa809d17eb6998a5a7d217559888a | refs/heads/master | 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,718 | py | #!/usr/bin/python3
#$ -P P_comet
#$ -j y
#$ -cwd
#$ -M [email protected]
#$ -m be
#$ -q mc_gpu_long
#$ -pe multicores_gpu 4
#$ -l sps=1,GPU=1,GPUtype=V100
import os
import sys
sys.path.append(os.getcwd())
import argparse
import logging
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=UserWarning)
parser = argparse.ArgumentParser('Train CDC GAN')
parser.add_argument('--n-epochs', type=int, default=1)
parser.add_argument('--ngf', type=int, default=16)
parser.add_argument('--ndf', type=int, default=16)
parser.add_argument('--latent-dims', type=int, default=256)
parser.add_argument('--sequence-length', type=int, default=2048)
parser.add_argument('--net-version', type=int)
parser.add_argument('--enc-dim', type=int, default=4)
parser.add_argument('--pretrain', type=int, default=0)
parser.add_argument('--pretrained', type=str, default=None)
parser.add_argument('--no-pretrain', action='store_true')
parser.add_argument('--log', type=str, default='info')
parser.add_argument('--gfx', type=bool, default=False)
parser.add_argument('--seed', type=int, default=1337)
parser.add_argument('--continue-from-epoch', '--cfe', type=int)
parser.add_argument('--continue-from-job', '--cfj', type=int)
args = parser.parse_args()
job_id = int(os.getenv('JOB_ID', default='0'))
output_dir = 'output_%d/' % (job_id)
print('Outputting to %s' % (output_dir))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logging.basicConfig(filename=output_dir+'output.log', level=getattr(logging, args.log.upper()), format='%(asctime)s %(message)s')
n_epochs = args.n_epochs
ngf = args.ngf
ndf = args.ndf
logging.info('ndf=%d' % (ndf))
logging.info('ngf=%d' % (ngf))
latent_dims = args.latent_dims
seq_len = args.sequence_length
encoded_dim = args.enc_dim
torch.manual_seed(args.seed)
np.random.seed(args.seed)
pretrain_epochs = args.pretrain
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
logging.info('Running on GPU: %s' % (torch.cuda.get_device_name()))
else:
logging.info('Running on CPU')
def to_device(x):
if torch.cuda.is_available():
return x.cuda()
else:
return x
print('Import networks version %d' % (args.net_version))
logging.info('networks=%d' % (args.net_version))
import importlib
networks = importlib.import_module('networks%d' % (args.net_version))
print('Importing networks from "%s"...' % (networks.__name__))
gen = to_device(networks.Gen(ngf=ngf, latent_dims=latent_dims, seq_len=seq_len,
encoded_dim=encoded_dim))
logging.info(gen)
disc = to_device(networks.Disc(ndf=ndf, seq_len=seq_len, encoded_dim=encoded_dim))
logging.info(disc)
ae = to_device(networks.VAE(encoded_dim=encoded_dim))
print('generator params: %d' % (networks.get_n_params(gen)))
print('discriminator params: %d' % (networks.get_n_params(disc)))
print('AE params: %d' % (networks.get_n_params(ae)))
logging.info('generator params: %d' % (networks.get_n_params(gen)))
logging.info('discriminator params: %d' % (networks.get_n_params(disc)))
logging.info('AE params: %d' % (networks.get_n_params(ae)))
#print('Importing geometry...')
#import geom_util as gu
#logging.info('cumulative wires {0}'.format(gu.cum_n_wires))
print('Importing dataset...')
import dataset_altered as dataset
data = dataset.Data()
data.load()
logging.info('pot %d bunches %d', data.n_pot, data.n_bunches)
logging.info('dtypes {0}'.format(data.data.dtype))
logging.info('shape {0}'.format(data.data.shape))
import geom_util
gu = geom_util.GeomUtil(data.get_cdc_tree())
gu.validate_wire_pos()
print(data.get_cdc_tree().shape, data.get_cdc_tree().dtype)
import matplotlib
if args.gfx:
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams['savefig.bbox'] = 'tight'
plt.rcParams['savefig.transparent'] = False
plt.rcParams['axes.labelsize'] = 'large'
plt.rcParams['axes.titlesize'] = 'x-large'
plt.rcParams['savefig.facecolor'] = 'white'
plt.figure(figsize=(6,6))
plt.scatter(gu.wire_x, gu.wire_y, s=1, c=gu.layer)
plt.xlabel('x [mm]')
plt.ylabel('y [mm]')
plt.savefig(output_dir+'wire_position.png', dpi=120)
plt.clf()
print('Pre-processing...')
train_minmax = data.preprocess()
data.diagnostic_plots(output_dir)
train_loader, train_dataset, n_chunks = data.chunk(seq_len, batch_size=32)
print(train_dataset[0:4][0].shape)
def sample_real(batch_size):
idx = np.random.choice(np.arange(n_chunks), size=batch_size)
p, w = train_dataset[idx]
one_hot_w = F.one_hot(w, num_classes=gu.cum_n_wires[-1]).squeeze(1).permute(0, 2, 1)
# Return shape is (batch, feature, seq)
return p, one_hot_w
def sample_fake(batch_size, tau):
noise = to_device(torch.randn((batch_size, latent_dims), requires_grad=True))
sample = gen(noise, 0.0, tau)
return sample
_p, _w = sample_real(2)
print(_p.shape, _w.shape)
__f = sample_fake(2, 1.0)
print(__f[0].shape, __f[1].shape)
tau = 2
discriminator_losses = []
generator_losses = []
occupancy_losses = []
gradient_pen_hist = []
ae_losses = []
dist_losses = []
pretrain_losses = []
pretrain_dist_losses = []
pretrain_acc = []
start_epoch = 0
from adabelief_pytorch import AdaBelief
optimizer_gen = AdaBelief(list(gen.parameters()) + list(ae.dec_net.parameters()),
lr=2e-4, betas=(0.5, 0.999), eps=1e-12, weight_decay=0.0, rectify=True,
fixed_decay=False, amsgrad=False)
optimizer_disc = AdaBelief(list(disc.parameters()) + list(ae.enc_net.parameters()),
lr=2e-4, betas=(0.5, 0.999), eps=1e-12, weight_decay=0.0, rectify=True,
fixed_decay=False, amsgrad=False)
print(optimizer_disc)
optimizer_ae = torch.optim.Adam(ae.parameters())
noise_level = 0.00
def weight_init(m):
if isinstance(m, nn.Conv1d) or isinstance(m, nn.ConvTranspose1d): # or isinstance(m, nn.Linear) # or isinstance(m, nn.BatchNorm1d):# or isinstance(m, nn.Embedding):
nn.init.normal_(m.weight, 0., 0.08)
if hasattr(m, 'bias'):
if m.bias is not None:
nn.init.zeros_(m.bias)
#gen.apply(weight_init);
#disc.apply(weight_init);
if args.continue_from_epoch is not None:
path = ''
if args.continue_from_job is not None:
path = 'output_%d/states_%d.pt' % (args.continue_from_job, args.continue_from_epoch)
else:
path = output_dir+'states_%d.pt' % (args.continue_from_epoch)
print('Loading GAN states from %s...' % (path))
device = torch.device('cpu')
if torch.cuda.is_available():
device = torch.device('cuda')
states = torch.load(path, map_location=device)
disc.load_state_dict(states['disc'])
optimizer_disc.load_state_dict(states['d_opt'])
discriminator_losses = states['d_loss']
gen.load_state_dict(states['gen'])
optimizer_gen.load_state_dict(states['g_opt'])
generator_losses = states['g_loss']
tau = states['tau']
start_epoch = states['n_epochs']
print('Starting from', start_epoch)
#data.qt = states['qt']
#data.minmax = states['minmax']
occupancy_losses = states['occupancy_loss']
ae.load_state_dict(states['ae'])
optimizer_ae.load_state_dict(states['ae_opt'])
if 'gradient_penalty' in states:
gradient_pen_hist = states['gradient_penalty']
if 'ae_loss' in states:
ae_losses = states['ae_loss']
if 'dist_loss' in states:
dist_losses = states['dist_loss']
print('OK')
if pretrain_epochs == 0 and args.no_pretrain == False:
if args.pretrained is not None:
path = pretrained
else:
path = 'ae_states_v11.pt'
print('Loading pretrained autoencoder from %s...' % (path))
device = torch.device('cpu')
if torch.cuda.is_available():
device = torch.device('cuda')
states = torch.load(path, map_location=device)
if args.continue_from_epoch is None:
ae.load_state_dict(states['ae'])
optimizer_ae.load_state_dict(states['ae_opt'])
pretrain_losses = states['pretrain_loss']
pretrain_dist_losses = states['pretrain_dist_loss']
pretrain_acc = states['pretrain_acc']
print('OK')
def add_noise(x, noise_level, clamp_min, clamp_max):
#return torch.clamp(x + torch.randn_like(x) * noise_level, clamp_min, clamp_max)
return x
print('Training begin')
import time
import torch.autograd as autograd
def save_states(epoch):
states = { 'disc': disc.state_dict(), 'd_opt': optimizer_disc.state_dict(),
'd_loss': discriminator_losses, 'gen': gen.state_dict(),
'g_opt': optimizer_gen.state_dict(), 'g_loss': generator_losses,
'tau': tau, 'n_epochs': epoch, 'qt': data.qt, 'minmax': data.minmax,
'occupancy_loss': occupancy_losses, 'gradient_penalty': gradient_pen_hist,
'ae': ae.state_dict(), 'ae_opt': optimizer_ae.state_dict(),
'ae_loss': ae_losses, 'dist_loss': dist_losses,
'pretrain_loss': pretrain_losses, 'pretrain_dist_loss': pretrain_dist_losses,
'pretrain_acc': pretrain_acc }
torch.save(states, output_dir + 'states_%d.pt' % (epoch))
print("Saved after epoch %d to" % (epoch), output_dir + '/states_%d.pt' % (epoch))
wire_to_xy = torch.tensor([gu.wire_x, gu.wire_y], device='cuda', dtype=torch.float32)
wire_to_xy = wire_to_xy / wire_to_xy.max()
# wire_to_xy (2, 3606)
real_dist_matrix = torch.cdist(wire_to_xy.T, wire_to_xy.T)
def concatenate_p_w_xy(p, w, xy):
return torch.cat([p, w], dim=1)#, xy], dim=1)
# Implement "Gradient Penalty" for WGAN-GP (https://arxiv.org/pdf/1704.00028.pdf)
def gradient_penalty(disc, interpolates_p, interpolates_w, interpolates_xy):
interp_x = concatenate_p_w_xy(interpolates_p, interpolates_w, interpolates_xy)
d_interpolates = disc(interp_x).squeeze()
grad_outputs_x = to_device(torch.ones(d_interpolates.shape, requires_grad=False))
gradients_x = autograd.grad(outputs=d_interpolates,
inputs=interp_x,
grad_outputs=grad_outputs_x,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
gradients_x = gradients_x.reshape(gradients_x.shape[0], -1) + 1e-8
gradient_pen = ((gradients_x.norm(2, dim=1) - 1)**2).mean()
return gradient_pen
ae.train()
ae_loss_fn = nn.CrossEntropyLoss()
# Pre-train AE
print('Pretraining AE...')
norm_losses = []
kld_losses = []
pretrain_noise_lvl = 0.05
for e in range(pretrain_epochs):
# Make the batch size 1, so we get all wires every time
bsize = 1024
wires = torch.tensor(np.random.choice(np.arange(gu.n_wires), size=bsize)).cuda()
n_its = gu.n_wires // bsize + 1
for i in range(n_its):
optimizer_ae.zero_grad()
# wid (256,)
wid_ohe = F.one_hot(wires, num_classes=gu.n_wires).float().requires_grad_(True)
# wid_ohe (3606, 3606)
enc = ae.enc_net(wid_ohe)
# enc (256, enc_dim)
fake_dist_matrix = enc.view(bsize, 1, encoded_dim) - enc.view(1, bsize, encoded_dim)
# fake_dist_matrix (256, enc_dim, enc_dim)
fake_dist_matrix = torch.sqrt(1e-16 + (fake_dist_matrix**2).sum(dim=2))
#print(wire_to_xy.shape)
r_w = wire_to_xy[:,wires].T
#print(r_w.shape)
real_dist_m = r_w.view(bsize, 1, 2) - r_w.view(1, bsize, 2)
real_dist_m = torch.sqrt(1e-16 + (real_dist_m**2).sum(dim=2))
#print(real_dist_m.shape)
#print(real_dist_matrix_sub)
#print(real_dist_matrix_sub.max())
#print(fake_dist_matrix.max())
dist_loss = nn.MSELoss()(fake_dist_matrix, real_dist_m)
#norm = torch.norm(enc, dim=1)
#norm_loss = torch.mean((norm - 1)**2)
#norm_losses.append(norm_loss.item())
encdec = ae.dec_net(enc)
# encdec (256, 3606)
ae_loss = ae_loss_fn(encdec, wires)
#print(ae_loss, dist_loss)
loss = ae_loss
#print(ae.enc_net.kl_loss)
loss.backward()
optimizer_ae.step()
pretrain_losses.append(ae_loss.item())
pretrain_dist_losses.append(dist_loss.item())
choice = torch.argmax(encdec, dim=1)
hit = (wires == choice).sum().float()
acc = hit / wires.shape[0]
pretrain_acc.append(acc.item())
#if pretrain_noise_lvl > 0.001:
#pretrain_noise_lvl *= 0.999
if e % 200 == 0:
print('Epoch', e)
print('cross entropy loss:', np.mean(pretrain_losses[-100:]))
print('accuracy:', np.mean(pretrain_acc[-100:]))
print('KL loss:', np.mean(kld_losses[-100:]))
print('dist loss:', np.mean(pretrain_dist_losses[-100:]))
print('norm loss:', np.mean(norm_losses[-100:]))
print('noise_lvl =', pretrain_noise_lvl)
#print(ae.enc_net(F.one_hot(torch.tensor([0]), num_classes=gu.n_wires).float().cuda()))
#print(ae.enc_net(F.one_hot(torch.tensor([1]), num_classes=gu.n_wires).float().cuda()))
#print(ae.enc_net(F.one_hot(torch.tensor([3605]), num_classes=gu.n_wires).float().cuda()))
all_wires = F.one_hot(torch.arange(gu.n_wires),
num_classes=gu.n_wires).float().cuda().requires_grad_(True)
enc = ae.enc_net(all_wires)
enc_norm = torch.norm(enc, dim=1)
#print(enc_norm.shape)
#print(enc_norm)
print('norm mean:', enc_norm.mean())
print('norm std:', enc_norm.std())
print('norm min / max:', enc_norm.min().item(), enc_norm.max().item())
print('OK')
if pretrain_epochs > 0:
save_states(0)
gen.train()
disc.train()
lambda_gp = 10
n_critic = 4
for e in range(start_epoch, start_epoch + n_epochs):
logging.info('Epoch %d' % (e))
print('Epoch %d' % (e))
for i, (real_p, real_w) in enumerate(train_loader):
disc.train()
gen.train()
# real_p (batch, 3, seq_len)
# real_w (batch, 1, seq_len)
real_w_ohe = F.one_hot(real_w.cuda(),
num_classes=gu.cum_n_wires[-1]).squeeze(1).permute(0, 2, 1).float().requires_grad_(True)
# real_w_ohe (batch, 3606, seq_len)
# Critic optimization step
optimizer_disc.zero_grad()
# Weight clipping
#for p in disc.parameters():
#p.data.clamp_(-0.01, 0.01)
# Take loss between real samples and objective 1.0
real_p = to_device(real_p).requires_grad_(True)
real_enc_w = ae.enc(real_w_ohe)
real_xy = torch.tensordot(real_w_ohe, wire_to_xy, dims=[[1], [1]]).permute(0, 2, 1)
#print(real_xy.shape)
# real_xy (batch, 2, seq_len)
#print(real_xy[5,:,5])
#print(wire_to_xy[:,real_w[5,0,5]]) OK!
real_x = concatenate_p_w_xy(real_p, real_enc_w, real_xy)
out_real = disc(real_x)
fake_p, fake_w = sample_fake(real_p.shape[0], tau)
fake_dec_w = F.gumbel_softmax(ae.dec(fake_w), dim=1, hard=True, tau=tau)
fake_xy = torch.tensordot(fake_dec_w, wire_to_xy, dims=[[1], [1]]).permute(0, 2, 1)
fake_enc_w = ae.enc(fake_dec_w)
fake_x = concatenate_p_w_xy(fake_p, fake_enc_w, fake_xy).detach()
out_fake = disc(fake_x)
eps = to_device(torch.rand((real_p.shape[0], 1, 1)))
interpolates_p = (eps * real_p + (1-eps) * fake_p).requires_grad_(True)
interpolates_enc_w = (eps * real_enc_w + (1-eps) * fake_enc_w).requires_grad_(True)
interpolates_w = interpolates_enc_w
interpolates_xy = 0
#interpolates_dec_w = F.gumbel_softmax(ae.dec(interpolates_w), dim=1, hard=True, tau=tau)
#interpolates_xy = torch.tensordot(interpolates_dec_w,
#wire_to_xy, dims=[[1], [1]]).permute(0, 2, 1).requires_grad_(True)
gp = gradient_penalty(disc, interpolates_p, interpolates_w, interpolates_xy)
gradient_pen_hist.append(gp.item())
#print('real score:', torch.mean(out_real).item())
#print('fake score:', torch.mean(out_fake).item())
#print('delta:', torch.mean(out_fake).item() - torch.mean(out_real).item())
D_loss = -torch.mean(out_real) + torch.mean(out_fake) + lambda_gp * gp
discriminator_losses.append(D_loss.item())
D_loss.backward()
optimizer_disc.step()
if (i % n_critic == 0):
# Generator update
disc.train()
gen.train()
optimizer_gen.zero_grad()
fake_hits = sample_fake(real_p.shape[0], tau)
fake_p = fake_hits[0]
fake_w = fake_hits[1]
#fake_enc_w = fake_w
fake_dec_w = F.gumbel_softmax(ae.dec(fake_w), dim=1, hard=True, tau=tau)
#fake_xy = torch.tensordot(fake_dec_w, wire_to_xy, dims=[[1], [1]]).permute(0, 2, 1)
fake_enc_w = ae.enc(fake_dec_w)
fake_x = concatenate_p_w_xy(fake_p, fake_enc_w, 0)
#fake_wx = torch.tensordot(wire_to_xy, fake_w, dims=([1], [1])).permute(1, 0, 2)
out_fake = disc(fake_x)
#print(fake_w.shape, fake_enc_w.shape)
ae_loss = nn.MSELoss()(fake_w, fake_enc_w)
print(-torch.mean(out_fake), ae_loss.item())
G_loss = -torch.mean(out_fake) + 100 * ae_loss
generator_losses.append(G_loss.item())
G_loss.backward()
optimizer_gen.step()
#if (tau > 1):
#tau *= 0.99#5
if (noise_level > 1e-4):
noise_level *= 0.999
logging.info('noise level %f' % (noise_level))
logging.info('tau %f' % (tau))
if ((e+1) % 100) == 0:
save_states(e+1)
print('Done')
print('Saving models...')
print(start_epoch + n_epochs)
save_states(start_epoch + n_epochs)
| [
"[email protected]"
]
| |
84ab98771c4d8d46a86328fa9598e1a9fb82ae88 | 8e429e825cd28b74e18ac18f8a48f748e89ccb1b | /webapp/models.py | 96169d8af49ab8e63decd2d396e7c03a691877b2 | []
| no_license | gibsonx/AnsibleGUI | 0de7bd40259950ede14fe452f43f99a568af3ed2 | 00ff984e9d2385bfae68e7de82201b6fae336d48 | refs/heads/master | 2023-03-12T07:40:39.930422 | 2023-02-19T07:19:00 | 2023-02-19T07:19:00 | 168,816,549 | 1 | 0 | null | 2023-01-24T23:20:08 | 2019-02-02T09:39:48 | Jupyter Notebook | UTF-8 | Python | false | false | 1,150 | py | from django.db import models
# Create your models here.
class Host(models.Model):
hostname = models.CharField(max_length=16)
ip = models.GenericIPAddressField(null=True,blank=True)
port = models.IntegerField(null=True,blank=True)
username = models.CharField(max_length=16,null=True,blank=True)
password = models.CharField(max_length=16,null=True,blank=True)
ssh_key = models.TextField(max_length=30,null=True,blank=True)
mod_date = models.DateTimeField('最后修改日期', auto_now = True)
def __str__(self):
return self.hostname
class Group(models.Model):
groupname = models.CharField(max_length=16)
hosts = models.ManyToManyField(Host)
def __str__(self):
return self.groupname
class GroupVar(models.Model):
key = models.CharField(max_length=16)
value = models.CharField(max_length=16)
group = models.ForeignKey(Group,on_delete=models.CASCADE,default='')
def __str__(self):
return self.key
class Tag(models.Model):
usage = models.ManyToManyField(Host)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name | [
"[email protected]"
]
| |
e5138030c49c45efb963e43ee9fff85323b8bdc4 | e9eed586eb25a8805411a0c1069f79fb70be957d | /Course/migrations/0002_course_link.py | b356e7f70326435ad5679cbf93d6ad5b4e14bfef | [
"MIT"
]
| permissive | jay1999ke/PureQPA | 61d250f85889867502a46f87385d825b764bab0c | c5ba6d7998d5fb1544b81bc076dbd19c3017fa9e | refs/heads/master | 2020-04-24T18:05:00.321716 | 2019-06-21T17:39:51 | 2019-06-21T17:39:51 | 172,169,063 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # Generated by Django 2.1 on 2018-10-16 13:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Course', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='course',
name='link',
field=models.CharField(blank=True, max_length=512),
),
]
| [
"[email protected]"
]
| |
0fec4a68aaa4b8c693563d5a6f693b2a76e69cd4 | 6c2d219dec81b75ac1aef7f96f4e072ed7562f81 | /scenes/siteAbbeyMaley.py | 9e59ff21c8d8c837c9a9feb11472785a774722e5 | []
| no_license | SFTEAM/scrapers | 7e2b0a159cb19907017216c16a976d630d883ba5 | 778f282bf1b6954aa06d265fdb6f2ecc2e3c8e47 | refs/heads/main | 2023-08-15T18:21:41.922378 | 2021-09-24T22:24:29 | 2021-09-24T22:24:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,729 | py | import re
import scrapy
import tldextract
from tpdb.BaseSceneScraper import BaseSceneScraper
### Abbiemaley.com has all scenes hidden behind a paywall.
### Sexyhub seems to have recent updates, and is getting current ones as
### well, so I'm pulling from there.
class siteAbbieMaleySpider(BaseSceneScraper):
name = 'AbbieMaley'
network = "Abbie Maley"
parent = "Abbie Maley"
start_urls = [
'https://www.sexyhub.org',
]
selector_map = {
'title': '//h1[@class="title"]/text()',
'description': '//div[contains(text(),"Description")]/following-sibling::div[1]/text()',
'date': '//div[contains(text(),"Release Date")]/following-sibling::text()',
'date_formats': ['%d %b %Y'],
'image': '//meta[@property="og:image"]/@content',
'performers': '//div[@class="models"]/a/text()',
'tags': '//div[contains(text(),"Categories")]/following-sibling::span/a/text()',
'external_id': '.*\/\d+-(.*)-abbiemaley',
'trailer': '',
'pagination': '/xfsearch/site/AbbieMaley.com/page/%s/'
}
def get_scenes(self, response):
scenes = response.xpath('//h2[@class="title"]/a/@href').getall()
for scene in scenes:
if re.search(self.get_selector_map('external_id'), scene):
yield scrapy.Request(url=self.format_link(response, scene), callback=self.parse_scene)
def get_performers(self, response):
performers = self.process_xpath(response, self.get_selector_map('performers')).getall()
if performers:
performerlist = []
for performer in performers:
performer = performer.lower()
if " aka " in performer:
performer = re.search('(.*) aka ', performer).group(1)
if performer:
performerlist.append(performer.strip().title())
return list(map(lambda x: x.strip().title(), performerlist))
return []
def get_tags(self, response):
if self.get_selector_map('tags'):
tags = self.process_xpath(response, self.get_selector_map('tags')).getall()
if tags:
performers = self.process_xpath(response, self.get_selector_map('performers')).getall()
if performers:
for performer in performers:
if performer in tags:
tags.remove(performer)
for tag in tags:
if " aka " in tag.lower():
tags.remove(tag)
return list(map(lambda x: x.strip(), tags))
return []
def get_site(self, response):
return "Abbie Maley"
| [
"[email protected]"
]
| |
099628974bcaf4d9ee34df2927bab2dff0bd96aa | 460e3e1166a1f25ba3a4a55ee15545ee389876e4 | /gSpI312Q5jWoeO9u/qBkRcm0G5WFOlghx.py | 41bc7277af3e7a02f3684f35e0aae15cf2188e72 | []
| no_license | urlib/Hk504MNaz11j | 0a1d6ae5ced35b04749b501b0b35835b70ba6a29 | 8dc6c9791247ae943fb5d227e056db2614582e4b | refs/heads/master | 2021-05-17T12:01:53.821792 | 2020-04-15T04:14:24 | 2020-04-15T04:14:24 | 250,758,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,592 | py | 𪅷𩤽냟坾⢲𪮑𧂫𔑊짚櫳禎𠏈𠲽띡𠄡𥒫崕ﱟ𬅷栽蛙𮛩𔖽㜻𨐐蠶䱟𨽍𒄎𪤟𭸛𡤒𫦪聆썟𪗒獮𐓧𩣄䛒Ⱶⵆ⌻𩥡档郎獲ꑸ𣧦횧훜𧒙𫸈𓃏५𦃺ꯚ𘞥𑈳𥙯획嚃㾚⢈𦩮𬾿Ã𧦘𘟝뽶鮻𪕅ر𦌽ԛ𛰈뵯𦠷𫛗䱄𭁏𑌗𬸳闕𪍈𥈌𩈔㮆𮬆𑗃📝䰲巉砤𗾆𣌵🆩Ͼ𭺧䎃𪕿𩕼𖣶𩩦𘙹嫌𭵈𘞽𦭩𦐮𩣋𥧹𫤠ཱི𪧤𠮅┆𢱙𤂽𠵵䳑𭠌病𦼮𗔺𭷘裪𠒲릧𦟙↢奪𬟅𘣩𖼦꺋𡙾ฯ景肮𥇄轧🗞ﵯꠘ𩆅𢕔𭯯𩫃𒃛襷撣𘞗𭦶𮃬䴗𣵴쉕抯ᱨ𫖧̿𧋺Х글𢱡𨓸漚𨕄䎨䗗뉅𬿊𣂂𭂺찙𨖻蹈𩭧𐅩횸𣄘듓𨜺𨏮齼搱쨿횂ﲳ𝘧䘈沸㌚𮛠䅲𡭀𘉻𨔈𫒕𖡘먑𩈥𢜉𠕪눀𘙐߹𬴛𐐃𨸮𘈃𩃉𗱕𥗤𖹅𥨛𦷁𪅍𠖑𡢃𢪂𒃉픪𢁡𗙡𨌝顸剗𗛶𮛹𠚊𣇽წ𢨆쨟碙𡂖𘧕𧶓𡯞𝕽𫡻𦷕𦎖罍𘡜댐𫽫𨓮𘀨綽ﮡ𣽙𧽯遳𘪞𮠰盋𭼉𔑈𖢵𗟉܈ህ𭣽底𡽏쁎𦜹ၔ릮䆯蓝𤾐𐙦🛢삥𠼻洉𠚓𪁸𞠫䶖𢶽𝡙𗺎𬈴𑫇𣸂㟘𨥂嫏𝢠𫶩勥𗽈𭉸◬㣖⭀ௗ৾𡺬𢍐𓀴𥸱𮣠𫛈⺈𓄈𭜴𬝝峗㞓𤞔溹𤰢褳瓆𨺦䭭𠚫₤𩸿𦖍𨄯径𗱀𓃆𔔪ụ𨫽🉡𢃿𘠌𥍗𞺣𬵣𘝇𨪈𤿩𥠈𤍟𣾨𗦻☇𥰈噀𢹧𩨔𐑩𭮑刻𩔠𮅁𠋏𬝍𓋢𧴷𗃸𗎚菧狮𭝧𮡷𤇐𭱁𨛺뜔𮇅𣋬𭦛𭣺㍶𡛧𮚎꽐𘐲𮌱癓𧿅𠙦쓽훟𫙾왺챃嶲𠾁𫃧𠾘뉾𗇵읂溃踬掩ff𗥰𬹕콸᪦瀴𣣔𘘭낷𐕋𨤙蹋𫎖𦥂፞䩻뵉𧶾홐鼕䕺𥟷𩔜🆕𡄞𣆔𭀭𦃛斱𬈝䖸𫹚𣮥伒伷𦐁᭳Ꝑ𮢼櫜𪑟𤰐𤌖𬦈𥏾𦼛噣㛏𬼀𢘣ꁷ𭒣兼𥑆엩𨦀諨𫓆𑱢𭅈𘛁𬽤涵弖䝵𗧐⦕𠳴賣걅ঔ𤑏𡧵㼶𧷳𭂋𝕫𠔤ૂᨤ塟𐿡圶𐳑鷮𧖎𧛮씅𫆓𤌕𥝘𑐃䱌𣚀ﯖӉ𑰢⟜𦩉悅𩬯떈𝙩𘑴𥱅𬟕𡃱⸟巇𦷺𘏾𮜅𧢁𨫗퀎貢Ɫ𧻣遱𐳔ᨬ𡡊보𡺏沞殓𢯼役咑𐅤𨚗ᶚ𓀥Ⰽ𖦡𧥿𮪁졆釵滌𣾇𩰛𔗩淶ϓ𤔙쒛ᄜɓ𩹍𗋹㗉詼𪋫𪶰𬑠𐭁埦㑩𛆎𭽍𘜱𧶅떤𣣸𣄒ꀇ𬕋𤜗㳱𝃰𤨋𗅶蘵𣪒𒊮ء𑋦𥸿𥆎𩌱🏩·𡅇㖿𡠗𡃈𗫧ꂾ𘤑𣈛𢙑ꊝ𣈤𬉜𮚝墤𩲂懼샸𭨉剆፵🅂𬮧𨖴겿𧲼𭗐废𐚥𥂴𤛞랷𡰬𥑫尜𘘢鹿𠥪𗇜𪀪𡊷굸᱁𡘣𭞱𝂑貣𠙞𬲿緎煆𛊼𝖣𥽗〠騊㧾𥲼𫮃𠠌ҏ𭜺𗤤𗸎󠅍𐁔𛇝𞢭𫦬𮓮䎎𭳟𣔋⟲⟂𡶤𛁪ҙ댅𬔢𮬁𤵣𩱯帞𣨒𠟇𠊁𢨫悩𧸹㛪𖺅𭐒枰𪅧𗾯𫉴𫖞𡾝𗾍ᳺ𣞌𬽊𩿖닍ﵡ𧳰𤶔𘀕𣘲𥋒𣜣𢕊䖚𬾅𮢬𬔲襇𗢛屮𭘍③쯢䜩ꥃ𩈾𣃔𞥕谡娫𘢠亶𘋁𫋂𫚛𥧛𧦏诋ᩊ𗉊眪櫛𘢠싫𫣩웣𗠞𗷄𥰝𪁈帼𡽉𩾍𮍆㋬𧈨娭𖽸𪝎𠰀𡦺𡺅렫ಿ𤍻䕑𢴫𥽈ສ⨐侸𧵽㘅𫼃𧖭𥫛𩂏ٽ𧖫꾋껃𤄉𩭜阽𗹝𛉪灂𗊐𤘇𡂄𦮀㤹侄笱𫢐𭖹撝㐀鋳𬰇Ǟ𫗆𮭓𠀛徘镜𡍻푠𪺯⩊뵾𬚹𥂷𣑴偤𪥅𣯾픪𪪲𠛷䁖₮遤𭪎𢒸됪𠯁𫻏𐍰硷䌅𧽺鳖ᥞ𪳵𨟖𩠍❉🕜𫆃ឯ㼍𡔈玩ﻤ𤉬𨶜줛팅𛉞𤽐𨝚𣯑ꀾ셙𠫅𡇪퍭龌杁쨐𡜧𠲬𭶊뇗㈾켷𢋩𪥺䛗𠀥𠣗𧥔ٳ𬱔𖮆酐𥫦𘃩𑈝𧇺𦝂쇥黡𭘂禪𣢖𐀞𝅌께𬣫𡣊𫼺𩿰𛋷𬃚ྮℴⲍ𩏷𭆻བ쾪㏶Ⅱ𣡁趎𠯾𥮪𪻲𭙀𘅋쨿𠤺庐𖦾𮌥𢾯缽罺𬷼裂𫴥䐭扈𡧥꓿㨸𐼢𢊓𝔑𭡧𪅍𘗾㓱ꀘ𗷊嫀𪽾蘬堋멧齮Ⓢ🕿𭼦嫯顝𑦽⠥𧈫𢊅𢠪𧿉𦶝嚚ﰊ쏄𥇬ꁸ𘥊ⴿ梻ᔬ鄉𦑐荳Ƈ𧼴ꛢ꣥𝥘𘃕䇕𪿯𥊱넠䕜𘁥園𝜜𢤪壯㜹𓉉𤭙矞𤀤迁⬶됩𪐞ే𐨅𘌂𦬘𬶊謋𪣪𨦢𦇁Ҝ𪳑𦐣𢶧𐦂𪐤𩷍𤁸𫳊轖㤻𧷼𥲨懫럩탾𫛉𥥈¨⠼幑𮎨𗸍𦦙🎞𑻡杶😄𩒇𬬹𘤤🢟𫏔𑦲𠚯⸔𮎫𭔐𨌻맕塝𩉬𝩙ᥔ㛡𥿐𣹓𬼄𝣵𨕾𬿵𠮄嚐𣨇𐌑𦠶퓼𣮾撼꾪𧕓时𢻖𤟫䛞🖽𘏲쪑噰🈂𧻻𭎩𘛽𪝽𘗫𪖮덖㹘ⷅ𣑦𬳜𗌤姦𦩩𐢪贺𨾎𑂔、雌𩌴𨁠𐮃𤔱𢪚𫄄酇𫦊𨳑𗄎𣪻롁𫋩𨳗𬓽勅𤩬ᜊ𨂉灚⥟𔕸𨢺ꍲ⩗鼕盙𠪝𢇁𬛎𫚢𠠄𗌐𠖅𣟮譱𥠝郘𪓦𒓗𬫹𘂇𪃰𗽆𧂞𣖹𤓄𬊳榶🀡罧ꊆ𣯚𩂆⠓𧬨ㅠ𐙯釪𐨵㳮𭹴𪨅𡊗𤐅𠲄𦿑𢪊൶㺏🨉췗𫿆莴𖠢胱ࠨ贳䘠㤜𥴻簍𥧀𨛡𪓝𖢲𨢾⩮됌礏ꍡ𒂑𩅦𤡩譾𑵬ퟹ𭣫ϲ𠵥큆𫾁궓𫵏𪰈𮀎𭤶𗌣䑒𡜾𗁋𧬪𘎝𡪮ꇲ𬮋𥇋𣳵朔𗜞鉡鲺𦥠𤁭𧤖𝁨Ƣ㈲媶𢥮𐐊猽𐬮惧𨜡𘗨𫄔𧳪颓ᶓ엑𘂀𝠖ᵞꝭ𠭗𞡯𘂈坭𗔕𞠋𤍐𗗇𡇛𦫦ﻩ䞖湌𠠷𭯷𮮱𩲬𣂤𦾠咘鴻𬏴빢𧩇𛀱黻俐忤垬𢫮𓄊𐓤𦰚ė砓𢕸ᱠ受𮕲𢻅𣡼🝟𨎲𗎹𫁀Ï𫫧⠷誃𝨤𖣁ぁ껻Ⰳ𝣦쳳֮𣿂📙𫎚嶰仝𗃼웋ᡰ𣥍ꇈ榁🩢㫚眉쎫𘇩𥏦𣺄倵𪝆첡𤿷𠗮𢲩𨢇🚩𧍥⭾𫀱𩃦𝖣䃍𛱃𫁹樻ℰ𠢦飀𠴻𮋺ꈑ𤖿ꡌ𦲇햔ﹺ◒𤽒㳭𤰃𩕍𪖾𧶘Ⳏ𓇮𨁢ᇸ𭎨𮫸𩃸𬒜롦𣎓𤦂𘕶𥓽𪬖𮇩𦚐띩盓𭺻쳊𒐨𨕹𘦡ꊲ䥑𤴒𠋝𡘷넼𮂲𒇛𭬏𗛍𢹤畡𦖼𮏊𔐟𧿁𡥑𦗬𣎦𩗹𢾺𦰙𗳉𤍃𪃍𛇑𧌄𗛫𥓽苮뼦𭀺㳥栭𣞮㻺씿ᩆ齿곎뤢𡔨ꏹ𧱘𭝣𥔀갭𪴪𫶿𩠤𤌉儘𪭎묕Ꮎख𦼱𮜗𣃲蓮𪝈쳞𒃓⪯𥺾慷찥嵫쓹𤐶옰췅銑𐳼𣵨뷴𧾪볢쒥𩬃𔒟ꊥ𢎫㎘⼺㴤𗔵𐀡壝𮆼黍𦈢𝩾㨪鯧𦖆ᴂ𡰟𫽣𧈍ᓁ𝙷𣡧𪱠𩷹鷰ⳋ𣘉㆘𑪚𫮬𓍡𘔋𦾘𫨐퀴𢵐𬅸ꁉ웇㟧𧥃𥮖𝖤牑𠦋㚗蛉𭳣𤙽뺢బ𠽓𢟹𞋡𠒼𤫻膀㙻ꙓ翝𩜐沀𭎌𡋺㏄𮌞ﱟ𓇒뭃諷𩽠篓𠹚긌𘝛𪡫𖬥曫二藘㑤𮠒𑨀𨆾𐂽꺡𭴉㉿👓쒩𑣖샲⟚𭥺𧌰𦴄𡑆𫦇⑁𬪽뢣ꍛ돃鶀ⳇ혣𬷖홺𢎉栞𓍻𬶓흰𨸲𦗡𠟪ḟ𪾍ℌ杙𧻎鵹𐤉윻𦁙𡰓𧌲𬠺蒭𥡠𮔥𒅌针㷱㟣𪄱𤢻曎孫礉𬵕𗹑𭘳佤㈬𩟆𢸵𣩞폝ﴡᑈ𒒻𦪅탧힖๕豖䑟𘀖②䲉伦ᅍ𗣧ჳ𫹩듫傣맞扤𩝞誥𗉈𭄥𮛼🢤𥮏䥲𝠆劙ⳗ󠄺𫠁⢎𡻽ذ𬯿쾎𪺘𭤈𧊋잖𭓀㦲曻𧪪𖩔𧣶鐕椺᧓𩅍𬜴呂佱轈侳𨛝嬄ُ丞𡒺𦕇쑅𥴙㚌𨎄𗺬𤁒駾𠅩🐈𠉯𪾣𮊺𓏼𡹫䬫ၦ𢩠𭟐𖧵𥰷翫𣽑𠕠𥮾𨕔쭴𧥲鴏홸뫐瀁𠀂㛫𤯾䠭𨘕⊵𐰌륨窍𪆰𑙣𥆂𧺹蟡𠴗䒡쪨ㇰ𣞲탱狴𤴵𨆽⇊𧆽뗻⏶篼掳𒇍믔㶠㙶黜𤵢䊜顆𭙷𞲝碛𤗜輌𣇪置𗛖𨱤𢘏骔恾𬞂𘔧𬪟鍛𢲀Ӎ雿𩪓𘞖䆝𠛶싵𦲬𛲐𮢦ྨ👚鎽𒎊𛇼킆𑒥麁𗥔𨢎𝟟𤱿𢆞ᙬ茯𝜄𥂛䟌ѧ𘄘ᵨ𦮗𨃘פ𡿉𠾩𮛔꽰긽𧕀怢ܧ𧣦𐐯𣩩𗰋㝀j烸𭉧𫄜𫺚𝌺𠄑橵霃𩩑𗵒𮕃𬨨𑪅𧗺𫀿𦥆𦌉𫋯𧚷𣡔𘧂烈𠷿𨖒ꒇ𦇛∈𗳦똅瘃껒𥱋𦭢𩬡𬐒𐋦㧄犞ꔍ藆矔睓𬭘엧𩴩𘊌𡌊␐栃𤒏鰴𡽅ܡ𤷲忁𐜠𧣊⺃厎倲쥶욵𥈜䧕쉡兿𫧟Ꮀ𤘦듳అ𨇐砒鳈鐏𧧨ⶼ蕸𭟏𩶻㌄𧰧ԟ𬁦𦇇燎얢ؕꋣ𩲆࠲𝋡뇡凎𗠓𫀋〸🕀𝂖뀥𥟌𫣦쏘閮ᝒ𩟿𢃣ⴰ𝢤𘄥𑠭𣇠暰𭝂𬶉🎁𠐀𢔪𡌠𤶸𫼆왒ꏠ𣩧Љ𬤆殽既镥𞤳𧣩ᕈ𤌃鈨𡆊⇿驘畐꺻𠧀┰벲쫌ᄄ𪿽평䊥鬱㤍𧋢෫𡌮혈썟籞𑖊𘡭𝕚𠮦𛅵𣇼𬔯𡏡𬢘栵땫𤡪衵🁚꺘즬𭒶쥃궇넡𢜍𦢶𢼪🌌旸𨕭𤔧➥𪛌𩴚𩑶𡒜퍙탮𗜩𭊻臠𧥊𗸸쬼𭇪墷𥩝秴𗠃𤝲𘇀💩𩵲鑭𭍂𭶦渪臚𝁙𤄥寕𬜸윚℅캎𮡻𣰴𑵡⍋𝓖𒅎𠒤ﭞ采𐜪𭍳𦷍ᦙ𮞴䴴컁𫋙𝜯𨿽𒄂𑓀뜙𬺱𭷗慾𦔸ꃅ𩴟𫍓䝚𡗁𡊾𡅮𨄀鷬𨂒築𩴟蓚𮅃𩠑𡛏𗈇𩡢൬𩊃뵙Х🖸뺪緂ႜ𬥣𫜮𛰽墲𣭫鎉𤣷ར퍦𠿁𬝮𡹄𤪨촵ﷆ䜈珨𢂃羁𐍧𐡴💁념𬒎𥊐鹃𦷷ℒႺ𫼶禠𠿚𧨨𗋽𫲺𩒼𑙐𠓄摫𮜖𭧓𧃥𡹭翻𨿆4𭰱ၘ줣𪹻𮍌𥌨𑄩𘓥䂡𫸇𣦘𬮹ᮄ𔔆𠧭𠱀꜌𡺂𤬿㼫輝𗦪𣚮㳜𗾿👹耡䢌𭝡糷𒈸ᆊ𢪧𢍰𪇡𩘛ﺤ𓅅矂𣹣敿厅篞癑𫗁𫰐𩜔ඣ肖ף𨎓𨛺쏢𝖀挝𧱝𬡽𥛡𫾼𑚝孢䣲𬙰🎿𑁧𥻺𨕫䧭𫝞嬻𘋰鉶𮯖𫵰哤𭎁𢿭𐅠웽𧔱𥁑𮖂𑧂㇟𮉰🀕𤹈ৠ𮎃틠⽫哀🏩郎𬗔㳚𨴯𘏬𮪼𓂴읳𪧀𨜦𬘵𬿣𨱻𢯮𘩿𡐗氉嫀낺𫓬𣣻𨤇屣╉䮴𗨳챪𤀲𥴭쾣䧗敦𫉍㋶ÿ𡠴𪂥ᥪ갼𣀞𓂨𥈗𣈭𤄿𬦳但»𡤾楟𨟲𝢖廴𠱹받𦯨紡𩨷쀖ᆒ卤𥰷𠯑兗꺉𗀣𗷳𢃶𪋓𥹌篴됵䠽𬙊𢰺𨭇𨺽𣼈𥞍𧊔𫂰ӵ𨙥𨕒𬥢䠋纠妴툿𨙩𡱷𪦜𩳻𦐫𡋨従𐭨𪱰𩶩𦊏阁𫐷⋱㺅螛𫿄𞠿𣧣𥒨𤟝𡕔竼𫤯厓邉𩟿𬟖䞭㲌𡇊䖧𠨌𧨫🏋嘕𡂙𘕮𘃍𗦎漀奖𖤭𤺫𣵲ɓ빢𑪝㹱𣥘𨿦ਁ𣮂맶𢓭𝚉𐋳𫙌𫺾𭍁⚥픥𮃟𠘺𡜯𫵣爜𭚧𭙦誢姬𬣉助𢧇𘏿𮩠㡬酗🙰𘙘𪺮𨌐痚럩𝣯🃝𧲙𝝚𠫡量𤸀获缓㹼𗱁𠐹𡗻𗾋⿏륔𭆌𪤫𒍚𡮅짻𘊆忲𮩂嶮됋𫤔鵮𗖤𤖟𣧗𣚠𫡻𤭝㜕𥧤ᑦ貤𮘢奇磓ಎ𡆆𮗿2𪹹𫖶𤯤ⰰὠ醨𔒤𧥃⏣ꥇ𮁏䄩𞡕欷𦗪䲠𣠲○𗑞𧳹霼𠧒◕𭥂🆜𘫔𗨾𩁽睳𤛣𩄾ꋨ犚㌬𤍜𧑏웏𡭷𝔽𭾧軞𣇅𮖴𤿷뵳韂㧊䟥𡄝𡱌⾦𬢺𠰅죾幮睭ᣪ𪬡쑠䖻擶癋𡈴ゝ쀘嵆𡞵𓆆辚𖡙饻𢶽ࠫ𛃾𤧪𘪫𣭗𦅥𫀺먲𭃆念𭦝𝇐𭣓远𓎏𣠲ㅌ症蒀𧲞왶㶅⋳𮕶𞥃ᜤ𠩞뇲𭹉몜䶌ᙣ𤀭뫾𨔞𥹵𢶺䰕𫜑䐋眩𪺫𭅠𪡗䰧ꥯჄ𣑭𭋥𘫨𤍽𩫝凥䮩➕𭕨儶𗧅덹똸𣄱욳⁖𪌼珫𨱬谤ጂଚ𡚎𠹢붿𬧁હἢあ𨫶Ş𠏶鎃🞩𧲐𩍜盄뚇𪧃놋𢜅ȟ𣩏꾷𥂷𠶎𡓎𠬖𥬬𒋶𭍦𭙗𪞶𤃠箹挂𑊕𑵃𦵲ṃ🆥Ȋ🏕𫀷䑷𨠾𪖝홯ꓑ𠬼𑅮㙴䁻𬹮믍䠉𧆫𦤚𐎚𣐜ꦊ嗂廾𗞫팑𨃲춦篯𘋵𥌃𩳄𬎵𭇒耪𣟯𮘥𦓟🆄乇ꔍ𥬘披ហ귇𗳈𠡇𑿤ᣊ𒃺傎𭄏ઇ𮛔څꄃ𬱖𑆰𭄎𭫤𓈑𠔣𞠱軸ʇ괬്긗𡂔孅⼣𧮚𐄾綄婢탃袃렊𧧄퉑𢐙쀫갽뉟𖡗𢩱ꓟ𥍵𗸭𘣔𥬈𖬣⏆熓𧛨墷𡦍𭱌岿𬱋𤟔𡎦𩤉𦷗𦢽뵘𧻙먗埉瓟屲탕昑겺𤍬𪔕镒憃ᮒ𖧪𣚸𠭟𝞴書𬗈🄌𦇘𣆎𠁎𘫲𒓗𤪓Ỏ쉚𡇕脡𖹯𥙅ᖠ𬜀턻𣙑즫𫋍󠇇𧢒繼𭎲𫬴𥞖꿮િ𤛻騆𠢢贚⌖𗚔𔘔𬹇抽𬧤큄𧧣𠠰𮬜燣𨒺⼝𣗢兠ト𫒣홡𦗞枃𨽳ꀘ𭷄戤𦚽끟𠛑䅾𦡜𐼞𥺏ꛖ𫩳Ŕ𥟐䪋𮬒藌𞸱𠐵𤙏𬥍稄𗳑์⥋𧇐Ꮞ𭻩ꬌ䆙𭥌𗸃𪖫𘍚𗡐𤱀𢀵𪠪𡜿𠘧𬔸쏬𤡀𫵋𣎿⦓𭮒𦈅𢘎𤩥𮤽𑊄𓇐布𤞾跧𒊇戏燻ᆥ𬷻𦂅ᑶ릠𝟟䰏𤀟𫊻𑩭𬜊𓃼𧦩𣌌𞋁攜ᷖ掬𧬲𩒣鑲쀩꿣㙀쨩鑙𡆩𡡚𗾠驅𫰤𭆼𩱟𦦊ଜ𣻂垄𭏯غ綋崘𡡟𫷦𩭟𡀥𢍼𤅮𧍶𢪫𘔠拒繈𪤎덗𠿟𔕲𠂲㦓𒐧𫾱𢄎沾ꁈ𥘨밫𤾛𭢜컖𓆄𪋄䇲𩼺띙𗤠敷𢧗𗱹𐊶𤣏𝆘𭈷𦩝歂𦏠𓈯䦘웃𩿍𢥮𧼵𣻞峦𖣳𡻠餀𩄔𮛈𮘦𑚪𪓁𩝰𡻡𡃅𢮱𤞘𩻻偍証𩎂𦨠𮟪𧊣𫕘𓉬𤽔䣮𠢶𞢼𪕜䐿ﴙ𘐽황𬸷𮮱돃🩐𦈢籛𤉚𨒤옏𦆅淳𥲶哗𒀘ꁈ𫕰𐀂黠𩊭𗩐𦀡飑鉣樗𬘄𗃨𩵖𒆕𓁲𘃙𫨖𢭴𠡆껾瞟⬕︯勲㮒𧚐䰸힗𣷧䪶䋨읛𫞝뫑𭎱𨴿𝨎𤟍퓚ُ𥷙귂츾𗭺𮂗𪠮𪪢ꆅ𝓧𑋆𢋖𣩬ꈣ𥍣𘁉㦏᪳𨽗網♃엷𝓐쫟胣𘘚𗫘쎙睊𬸭𫁰𨛼㮧𘞃𨼣𘅴𪚁쫪𩿠𗕉𐬤𪃫𪑼圡𦅝簷놧𦚂𞥗𬄐𭞗𞅂팉𨏓፠寽𭿇峱𮉜𖤝휖䠡﹛𢠪𦿸𪆁𬑷𨺱压𪯋絶𨼷楾𥘫ꆋ𣝹𬯹𡼳녆글᪐𖫠风籎흋𔓭𦃰𐹫𮋰𪫱𨍨艡𘤘𪸣𬎨𘧉𬝿𢢧𣩮딜魥𝦛況ꔇ𗡏𤽥栚𮊃𝆉抣𡱂𨋗𮗒ስ𣽪𗲕𦽇𠅯𡍔𦠲𫚷몗𢘚𪢪𬚷껯漪𗈕㓕崔폰냰𓆑𫕇䬊𩁨𨊣ꪾ🌶襜杷琈𭹮ᙔ𫵗奓𮚖𝑱𥗀𮗣𘋦首𩼸𧱷𨗁疟ꁄ𭈕𠻇𫊢𑊻翰𪶇𠗨ᥒꁚ𡞴⠇Ⓞ𔒐𫎤肁罒昁柤𪪽𫧾㋫𪝸鲯𦨁𡥣𦘮⟧𥥲𣃄𫝈齪𦨳𐄸𤐐𩏡𭖄䳽𩣅𫋤𘜞𫩑ⓒ錙𬒒㸋𢜏𨄱𝥭𠞲ힵ변ವ𥅲斋٪𫎝𡅽𖡊𬲳𪨊🁜⒫ⵑ𬴈𩻬𒐫䔮𠆼聆ꭴ𪘖珓𭬏𧻫𖠱𛇽𗸧鎌ꏡ𨫝𭥒𬳆踧搖愺㭼𦱳𫦷이𤢬鋳䎵𡂡𠩇𢎂堪🃠𠥅ӎ戍ꕋ𦰣𮨬𢇪𠫀𩰐𣿦瓤珘𭃛낤𗑋𮘂踽𢘗𫑎危𑐪𭪞𤯙躕䊏嶈兩𩣣𫡢͜偸𢴟ገඦ𒒮缛𭼨𤦘𧙔ﱁ𢝦𠒪戨𮟄𐢎挭ꥣ𥅄鿭ᲈ𢮬百𬤹昫𥷉珮𥍃𥕓膎𗣴墬𡢵朳𤕊𭚋著𦤼𦷙𧨖𦄭𭆒䏍쐶𥃔䀑𗲼𧤽𡻁𬇐폺𫼔轱㰍所𫂷𨍫桹𧖟ﰍ𮪸𬡈𨂤𪰹⦐𦍇㗨❀📼𨶪𤋒𭕼攫廞𢚄𒊆叧浖ᝢ𑜌惽𥀗𠒬𢉎𭂡𔖹᥄ɹ𤓛폙𩠉珺NJᾇ𬏰⑶𬓳𗄍𨧥𖼴Ⰰ璕𥣍𫁐ㄥ𫀂夢𪟣𩮙𩢵𩂶𭷕𓂴࣪ꧻ𨆟𗋪潥𨽦剽𮎉𠮭▨𔖞П╨᱑啛箣繵鷖녑艴载𥔌𑘷饀氥𮗂ᯪ𘎫𣭾ᚆ勉ḇ嶃ꗭ𩋓㕶𓂱𡧱롅姻𢜜醞ḗ颟𝕌𖣗𗃰𨾠䨡𤟅㔶𣷐𐫀𠕸𡁃𩓾䰌玗㨯𥾎𗝪퐦⫣𝐴ꬑ𮠮𫄟↓𦵽𫇅㰐簙𮘲𡚒쾐𘖗𧴞𝀏蒬띷ፕ䯮𒓕笰𨋣𠔖㕽𑐻𡃿𥤙𩄼𪘋𣞸𑨟𭛆𛈍𭩢塼𣨸쭏𡧬攛వ𬃉𮀈𗲰𥘖𧠭𣗐𡤭𥶩𐭂瀊𠐥𪩵𔗎𢞣𥹜🄦𮩨⺟𮪵󠆁𨄌㻀𖬵𒎎𤢺𗢴⢞碎𣣻㱭⚁𖺀𬜨㑞𘅇𨅛𗠋𨌝𤆗薡𥮿𗅹䰺뺪𑪜𗖪쎳𥢈詎塴鐟혃ﴹ𢲙𥀰ꃳ빡𘀺𗔿𓐕𠅰々ፉ𡰌𭝇弈🆕ঙ𨙳蕳꾑𡐆Ꞅ𩧋𛊲𡕟𥆖𦴉伀𮣪𨂣𩙯𡕿搃𢺡⺯𭷚擧𑋲𦚴𦿁𫉒𞥀𞀞𮘫⳯빏笢𡀼菼𝗇𠰧𪚝𩆇邌罍𪪴𛆍𠜹𩞶쌦𛊯䰴☢뱙𘉃𡝺ਈ괿끒㬰ᮀ赶萼兔⼮𣖨㙕𧿏𡞧䃁𬈤𣜩扣𝨌𤶹𩺟𦄩𧨾𒌙𑗒𧼅充ձ⾑㥯𪱨蜝ᛎ촿𐜙띬滠蟡𪏈⽙𡅊矾庅𡼤𪋖𩤒𨮍𭸘𪅿𨒦紐𭟝쟹𘅝🌀𔐷𢀳쐫辞ĵ뛮𖭯縛粞쉑𠈘ᐻ틐접玒𭇧ꎠ𫲤茁𡔁𭐣𥂏𬫆花𗖴™𤡔ൽ獻𞠪𢃭𧾕퀰𭸷蚘𖭗𣪎𘋜𨄗𮏔𗧀킇𠈅Ꮀ濚𠕘江𣔵ﶥ𗣣𩸻䞛𦯫ੂ㏂㱣𗛡𐜆𪊒𮅰낓𢸆𦔬𧸩𩥉웕㞦繡뇝얪א𦠩𗂯𩲐믔𠥈𭿎걱俾𣷟𫐼𓀟🧑𥍘璖𢦧ꩥ帲嵫碧Ҳ齈碇䂏𩁬𪅾𣉗ࣱ𪑥빸𫄂𦒶𧈡쟺𧸄𤑖𡊟ㄠ넩괗㱸𖼙졥𐰱ⰿĝ宵𠿳醬𦞧𨈙뛈盇𑰒𭿦䀃𑨯𥜪𐔓𬌭𢿂鍫𐎹𮏍鑀鄓ळ𛁾❞犍쒛𫳟驚𦹃쑽뜩𤀨𘚈𧌦𝝬𭨖𦡻𒊄抁𦯯𭶤🏶𧟍啗𭾔𐃋𣡋ƙ𨔙𡊽牨𭻜툷ࠞ𝢒ᆇ𠢸𗀞𑲆ꆾꋶ𩭋𢤳ꨈ𭘺𭫈𢶨𡤢𒇙뫊𢌂𣡤𤬶忹𛀦𦥮𩤦蝷新퇟󠄸𦨒𖤴𥽯𫦥𠆝𫨨掅𝛸𤚯𞸭𣕜牯𮪇𛊥𠀭𮆋ੁ𢍿ၳ𤽄𢡳𠃼𘉚𠱙𐇝𪫺𧋲𦺭ఊ𡀠𦣨猵𡁵𩮵𦫯𪃙𭵇𫶔𤔈𨡓໋𘐁롪𘜋𤝗𐹲高𧵗久闄𩯌젎𫬥𭱰邓𣤪␢鍣䍿𡾛𦴔﹞𧲆첪𖽁𫽱錒𭴜𩴮굘𓋶榳茲𡕉毟𗚸蔞𗰷𪜿쀤𝃂ᙝ𥈈㈜𡰞𦙺듘浓䌓𪶭𧝘𑰚𩶵𧨴𐕎顊𠇹ᳮ𩼤𩕌𨡆𦣭𨟲𣿚𭗡𠓳𘁅𑂒𢇓𮒧𡁸셍𡥟𦺭𠅾隷阥渄𐚍구𫾳䰀邨𘈬궑犹㳶荓𠅧ꇡ𮄽𑂌둙𤹯ꨃ𫫪𘔩𡇼𨗒𭈽㑲𣌢𗒷㚛𧞞ㄣ𫋴𢉓𗁥摔𦤥𨝓𦀽𬉻謁𤂽𣩠𓋥ᩗ夠𢙦𥍥厸𑵑誚í捻ꥻ𗯦𬽦毴屷챻䮞怒𮮺ꑁ𐝁𥵄𦱠떠决徸𢖃𫌟𥠛헁ᾊ𖤦𛊏𣞇𑈅𩤂⊟ₒ𫙶𪆎웑𣛗ᘋ𥺉𠂎𢡮𦜣𨞾𨛃ᢊ𤘲𭾊𮝄𩷌橖𭥦轤謁堎ഖ𭊙澶𬜌Ꮤ𦷻𣚡詂𣦣𨯛φ𣗥𐴞𝡚蟞ꡊ坁謴𗁛𖼊𝘟𥾳Ⅷ鲥𠮆껇𥹰𩎒𑢻灕𖹃𗘇🤥𐓏𪏒𮋴𮩢𭈭铸𣎸ꕭ𦾱걔𨷑俕妗႙ܗ𭘥엓𬽋𠟿Ͷ𤓥숫𖼉𗦻㍚𤎜虡𩉢𣠅Ꭵ𐂊ฟ𩹳ⴀ𡟖𣥐𣃽倛𫛴𫏱柠𘀐𥣁ꍻ𭰏𤳛𗭶椡𭏠𔘝𘙬𗒠堗𞴻峷𘄜𝖺𣢪𦥨𧋽䋾𢬱𥓈𖧿𪾎𨸉𘜐𫷳𦇣𨴤𢜼滤흈磦𑀝𦭊뻷‧𨗗𩗐盕𗲖𮡿𗘜页𥮿埿𡡏脨𧶞̛𥘙퓜𪇋㇞𡘴⨫𪩎쯺𝓂㣜눝𬋫𤑋蕮𢋅𞸯𦢵펀𡿼𘙄闌𠎌𪾜㡈綥𘨪𒇂𧆿𬳷𡲕𫼎쓟㈷𭳧𪺲𧛜𦸂ᾐ箯𫤿쎗㝚𑆆胥𐏍쐲𢎅䖄𫃊𣠭𠃣𭎇𬇐𮗤ﵗꍯ삔𠋉𓇪𪣪ꈛ캴ჩᬫ鵲𗞵뵝𫼡𧶪祚噹菫鿇쒺𢙳𢣦𭒊𘢁𬶍靂𢹘𡘒뉜𮔂𓅏꩒𡾞鏹𠈷𤎆𬖦𒆆𘁾𩨍庨农𡜿𗧺鹼𑃵𠢭ꎣ𬃝𦠸虻𑜾嗢鋾䳒𢘓겷𥾮𑃨鍡뵪缾𠶾妆𤟎𠋑𤔂𑀎젋𣪵𮉸𘥜𫨅ఊ迌𧹼𦻅邟𐅪𧘄𠖟𛊴䭂𬯔㺄𮃄궋𒊼𮔁𐤿ꔙ𬹡𣦺묐𦰁𣎥篘ﵱ𓐡䳇𨅢픮唣𣮿𤖓𩒺뱧𧫸𭆻㠝큁𮊟𪜰𗴨𥔛흮瓵𮯜侂涞젆遏䶨𡡏蜆崿𦁻옽𥺊𧯃𣳟䳭𢺵𪄼𥩳𡘰𢪦𡫀𗷣𭘗峿ꪚ𮝪烃ﮎ𘧣𝣞𭲧𐀟𐕡毨𤶒頽᤹𐑿떎鰺𡝳뀬𭓺美𤛆卻渠剜惪𣣋𫸩欄𛊤𨏑䲢𣝭𘨵𭭀𐂻𐊾𨺖𧗀𗋜깼⻡𗴤𭗥𡿾𓏚𢤎╓犯𩌞漾냿䆙롃𡪳𝑚텾𦻍𭇑渍𗫋𐰶𬾰쉕𥸠𩏡妮圤유𓀄骉𣦾캇𗒍𭊆𤪱捼𨗈儊㛇ᡛ뫹𦬇𢘆雁ᑜ𢊡𮞵𫱫𣡌𥻃㫁鹯矈諺𧭳顖🡄𮂭𨩺𠟝쨔👰𑖬𐌠𘙭➡𗼩𨳈𨅂𖨂𢅻𩷠𬛎𠰷퐆𫐼𑫬𝤡𦮞𓅰𧐭𘧟儦𘜟𨞘𧙩ꉥ𦜞𫣗ጅ𨱞𤘑𖦯𭣳➃靁𨮳𣲣𝛨𮎗쭅𥯧𩴙𣒟鰨𥀎𨠻𓋓𥡦𩋘閝𢣆𑩴Բ𦁛䍐ㅯ𨯚팂灺鈠텴𡈃𢻵𫉝델ᕐ硔ꐂ俚❮빡𡣙훧식𬱁𦡜𫔚𩢎⎙𪬀𦪼𗔈䫚묿𨿥𠥞𢈁葪믞많Ň𣛘𘖝𑲌轝涺ឺꀹ삠𣍋᪽ꚓ𥤕⓫𑁠𠡵𩈫镋螦袲⺓𥷴𧯻㕳𘝙瑡𝌲𣽶𬚟𭄮𐑠⓿贈🡆𗾤𧊰▀镣쐟𥛛𗟢𮉡𣯐𥣫個䞤쏙𪸎讧𪊭䎋𝖌𫡟𩘃㙻𬯿𪢍믌刣ປ𗳠𨀾𣤞𤲞⒀鰧ሤ𡐝𬢼뮄˦𝒖𪐸𨊩𔓼🍳ᘶ药𫞤𧷆揣𧲧▗𬉉𠁵㌆𥜏飚𦽉묊𐢎𧤪𬯗ꕪ陋蕎𮖀・𫬨𧮶𪞅۩𥩅冥𖽠㷯𠕙𩓱ﭣᬘ牉𧯈𠠷ߧ𪨖𨻖𢛥꤁ꫦ듖𧆂𪲡邎𡕞𥍫䫵ᦄ똷┤ᗚ䕬𮂅➷䭰𬊢㉮塵𥸜𮒄㒲𫖁𤀬𦔡왰捻辂뷯蓥ꏞⲫ𖭯𪹀𗝩圧𧜿𠿩𧰈𐫳嚁𭠕ꏟ婳𡢆걞𢗸𧨖𦂂𬱞帰靛𢌹𢾘鋫𗋻𬙢⌿𮐨𤝜曘𣖲𪇬串𢦪𬜎𪁉箕ꂯಱ萒𑨉𫔺憻𗴻ꑮ𭳑皊𫊟𮕯𑑎▥𡃗𗞦闐𝦆𤮊譣𧻘𨵿𦦱參灯ᇍ𢇥𑠘𥮣𤛰𞹷汙𖽗曁𫻕𧛅躳𫅸𒅇诚㫋甹쎹𩘵𠏯楬奦ॶ╚팈𬐧𧙕돜𫿉守耆渠𫍛륮𥌼𩱽𫌤뻪Ⴟ엪쬫𑪓껖𤆊铫𠗖𘞵闘𑴥𡉜𦫵枅菺𐙚ખ訇疕薦貪𬯬𨯧ꉟ𨜢𩠆𬱸𨞵𑨗𬇺壖𨀋𘅎𫓦𤙄𧂏𫷤𐑃𬗷𗑜آ𣶆𥨭艶📳羿𫃎厖찗𗊠𬪣𩜟𡊙㯚𧌻䵃󠄧𥫀솱𑃳𭝳𦏻湽ꈌ𡟧䢱𒍳𤷯𦦼𧬟𪞅ா𠦤𗱉짶䳸𭿜ᔚ𥝻𤎎𗛣阞𫁎ᮔ𡉁𥅉𭱙𥠽𫸆𦯝㌹ፃ蜒脸懽䔵𗃿𘚝뙶싫𧘕頦𤺖ꄙ⩺𖼣𡗇弜罾𓈍࿊𠪧𬜜렓㼕ꑓ𨱞㈶𣗴𡮫㛴𨐛믏뒬좦𢱏𢻟𢀥𨯱둫𣄢啬𮪟奷𢫙𝚛𐫈䅍眩倞塁좾䎵䂻𡴅墅𗓜𤳐𘉎𒉷𬚐癟京瀊𫚍𧣨𣌶𤋞쑚ᐷ𧰶饞𞸟𦦌퉡𡻃𥼀𢒣赶力鵸𨜡騧硬𤧢𡦐暮䪯𨋿ﺲ𦇐㏔訶䖃件랶𖼓𡺖𑠎鵣𖭅🜮耪𨥯𭏯𪺵𤁤𨏽ﶤ𧂒㞫𒇏𫈷掩뻚𗄯𫥶𧌹ো𣯼ﮬᜣ𣨦侻𣷃㙈𢫚厌𠔫𘢩𬔜𘡊𢕓赮騑𝥱𠞾𭝡𗮽𫰼姎땢𨍾𧹦㿱蹃䳎˪䃋𒃎𒌢𣴞𗸐ⱳ🍩𦯹𠿲䙻嚑⤆뺖禲𡻿묥𥋃狎슎𝦧𠸫🃂𦯁𠙮龙𔐛璜淮颴㝕𬬥𭋒𡛞槢𘡛𗟨𩅏끎𥠡𣵪󠅭𓄋匴ꊆ🃭芘𗗓𨥆𘉨攚𛊥𨺝𑒁쫺꯴柯𡌇꼉⃤嫤掿𣕕爥虲𫕔묤𧣻𝍰犇𧐁䊖𒌛䧫🖌踠腩𨄂埸𮂞𡧤徙𡏹𗨲ᮺ𮯕𫒅앀𐅍퇠𨃈𠏝왃䵱廕𥥏𧻨𣧝𩻖𩚥⩊𤮔𘠘𣯴𓀲܆ꬕ𬺡𧗓𩒽𮋑𧛝㥠🨦𒄀𧞣𧺟猯횴쀮湟𗔨؇팽㴽𨥝䬤⩅𠟳𦋭𧶍𫮆𢇪𠶏𨗨𣌇𗙨𫹪𧶠𞲅ꖟ摨룓䢁㪬꩓Ҍ皛𫯛쳛𫀾𠡇𛈛╴🄯කㆸ퍃ຳ𨢁𮈕𥒁𖡈𧪵𠶁ꖨᬗ𫙏𘨾𨤈𐜊뗧𪥋鄊퓲己𑐏𑨃𘜷𮬛𦻓栰跷𥋡쮚𠬚ᵱࣛⰛ␇𪝉𤷸𪧮𬹷𝓱𘩗𠶘𘒬碭𪣴𤁘𥃘豖𦬒𒋮𪰙ꊱ𤈥𝙼隱𡞶쳇𬵑쪡𤉙𦞾ᙲ⸢𭁆뛎𪔎𩛌𦴮鵍𗶞ଖ𨪋𗢤𩾙𣲮𢸞𨼌𫅩𭙷ኂシ쪗ஷ𢧡⎲𤦎𩩧᷹휴糄Ზ𮨏𖢷沣𥟪󠄢ᙖ𤫫𡧜𢠃𤐥𣗏䙻ᘽ숤䈣𝣻𘙯挻𩠭銴懆󠆏車𑀬𓌯恝⌧𠅭𢟟𗴜𠊸ロ𩹌㩾ﰝ𗞕𤷀𥷸뎾虸𨳰緱𮓋鹉🙍茇暈㣏𘓈皒揍檰𦹒𮋘읞𐙭㑼𥃡홌𦞓𣕆⤃❤𬐅𮅲𡞑妸勺🞠栣𧁫㢴옱𨽌炻𘚬𮂰騾𣅉𖢙酼𧌀ﷸ𬚤𬎘𪽰뮹𩆅𤩋텦𑄝䕋買鯿𨈡𤻸瀋𥂰󠇭𬅿ﵓ埫𘂤暙摻郄꼈𬟄𘡮梔𪙵𬳋᰻𦡊📟𓌽𨗕ǣ𫥉ǀ𮍓𪃱𑐊ᚆ𩊘𡧝𠛚𫀲𧏓𥖂譒𫋇ң𘚿𭄂𗝞뚵𨘠𢽔𣺎𦤩𗖨顤뼯𐐟䄳𠴁ﲠ葼컢𠳱︉𑀘𬑰𬍉𖡟𝒬⛏𘙽ⴖ𘍥畲𨆎𭈺嶅轌𡩯㺫𨎂𘔡𥜡𡱬့꒞𫠂𑐎𠈳㳵𬓅닪𥕁䳜𖤰幯𑛇Ⴂ垅𪏯𗏷ꆚ탗𤇞۳쥹𫂻킳𬴣𭉢凌얎㾐鳋𪭀㑕𮇏箏㊑檦𩜬󠅟𬵴𨇕ℑ𧎭𗸑佲R𧦳ﶊᩌ𪻾𪜺🃤含⸼䫼꺲ኡ𨪀𪄼𥾆𥛐歊𩞗𓅝㖵ԇ𬢎懇陪😾췿㧤ڎ𬠙𢚦𭦍𬿘㡑𥐕𗂞祟𤿪𐧆𣾋𦺿𢲌ꇲ𭭟𡏀𦠵仮𠍻⬝𩣃𖧂𢚟𝠿𑈵𞡅𝘐䤼損𦫕𘌢臵缅𑑃𩦘𭦗𛋍𣙩셡𪹭㑌𭘶ĵ蒣𮛫𣜠𦉝䅴𐑉𗫥𨷟仱ᷤ𦥦읅僂𪝶𪞆𢰄𣤷㓷𧖘䄓𦲻𫿯ꬄ𒅃𩤬𧾡𘫮흥𥩒𒄼𗆕政𢒹㮭𢛯𩣨헴𠳷𪀵𝩱杖㵅𗿸𩅑ﺃ𠙷𮚖𘎬謰ꠅ𗯥후𪪺𩣚⏟啄𥊱⹇𗑹𝆍輓鐶ꐴ𭕃럫𝩪庶谗𦲎𫅮뽻𗆳邔𢓦𤮽⿀𪤨𭕺𛈧틿岦帿𐳕ảH𘐕𤏀𡃁ٜ𭀤𤰶𥖫𝑰🄱𑩨𤾦𝒆𫞿웍𠗜뚈𩌡𭻍𬮛𤶱𨣂𦀞𪋕衞🖺𔐧𨥨🁰纽𧣅븐𗤓쩣㑏킹當𢜅ﶄ𓈓䭯ꯄ亅𬬃𗩤𥉉𫴭𢧇𡩃ࡏ𤽠𨋛𗂀ြ発騭𠂺兿𨐧𮥵ꀏ𩅯숲𩶬𓉐𮈺㩑𤚿肩৳𗘭𫥙ﱱ𗍽𐛑𢸈唆𨼽𫣀氷𗳿𦰕𦡓ᆃ憽𗅕𨗍𦥝嗭𤋰𨷌𫕠蓺𡸃缴푑뒠𮇃扫𧼁ㄶ𫒢𬽭𡼝🎗𒔚𫕢𧾷𥚢뗉𬰵虌𨐐ي홵┈𭩛𠯿𢐦졋癐𒒠悠腠𦖓𣣶桔ᨴತ𡤟𪸨히䑼ᴪ𐮈뚣큫㐧莧𨅦𓀧𩉮𩐀𧊍⹅𪾊𥾇🀸𝟙䌯𤦢𐌖𩁰𨶶隿𗼨𣆜ァ𩿽맴Հ嫇ᾈ谞𢻥𗏌᛫🛴讪洘魾㭎㱻𭦄鎐浕𩭤剀𧋍𠮫幘𣿗듴𧢇鹦⏇货𨽙𥷭𐬚𨽷𪕋鷋𠰵埩𭔸錛蘘𐮄𐰦ᱭⲓ颒𪰈瞾築𥘢𫰅𡜅䛳챷𓃹𐙮𝈴蟷𔗬奼男𡝆넎𘋵𦢪쮔ᆱ𑫕쿡𬰢柫𐦅ꪶ😱𘠬𩳕艡𤞳𡥿𢋠𗢚𑘧𫀞𗺰遅𧳱𦏠𘓢駈𭅜𡜁𮆃끷翞䈵𢮐𥴳𤯃𣲉𢔚𗀒𫳶𐭰𝥞𭀠𧪡𡸶礷𭞙癱ి롃𢴟𧋍𦮭乧𬌎꘦⮜𓎸ણ𭑐ꅄ𭓳dž🍬𗚡𘗿䦚𬥣鳌𣞺𭜢𧊬ꁠ蝶𪩟睹㲟𭞟㮸𝝽𘪄ィ𤌵𥌧𓉆𧮰Ꮘ儖阝މ䢪𔙅쥒鷜𪳂ἕ𝗦헼眦쭂䏴ꐻ𤂲럭𮬙𨯧緙𣜿⼏⫑ꠍ𡷧𤍐ᕮ𓂈亍𗀒𐛫𬇉𗱔荏𧙙🜿𢄝𢛂䳸ᴷ𦘋𨃧𩧄ⓔ饯舭𐙡𘣼𠋀𣠵烳誣𪦟𘘲籠ბ𑨆𩇚🍋𤅿𮜵𡞘迷謲蓼𦀷鱧𭱡𨝓赆ʩ𥫲𛊏𒒿𐍑𑱳쿧𪷷㔧ㄅ啦𭽥𤴱鏔瀶쪭𫓔𭞖𭸡ꯪ𐳬𘃊𗁯𡫋𢶱㵄𥦽𦖻𭞲𪿲𥸉멫䜎䌬᪇𫂾𪻥𫑐𧃃㶺𬁥𩆓𘇆𦫑癛𫑴డ屯𑩞𤷝𫿨㨾墨伉𫽛𬬍𡜕𬩼䵯𠆭𦧙𫫓𧟾𣑮𭕔⌹𭺂𬽲𤢹𤙮𪶧㡽𧔘𗐖𡣝댬𤥌𡫸𩤰𗈦蚼𠎶죸🎼ן𫊊𧛧ᆐࣹ𥛜𮆀𪓨ث䶭𬚐𑅞𗰙𥗉ꂦ𛁴늧昙㪁𮊣𨳃𠨇𢔄ꣁ拮𦥶ﲛ𤮆𝢭𮓀ざ𦨑猋犗眯𖮂𢍔𪌀🙁𫾼𬟤辀䙋𭛗爑𩾗𮃸豳𪟼할𑵡𪉰蔏퀎🡼ᕖ𨒝𐙫𪋬𧏧𢰻𭱋ವ鶄蘌𧩖𨄀𑅢𓄍日롑𘔾𣦳伪𧯘㾫䄭宲𦪢𡔡𤥍㱏𠤽岥弉𠖓𢶸𠊁𧂘𨵿𐛶𧹑畧둪𪈭髟𪬋敛⁊䶥𣻽𪥸阻𩉗𛀸ႌ𢩬鄻𭞁烍筜𥓛𡈅𝂝좇𣹃滈𗆮𠄇ढ𥾉㖹🐐𢟗୵𪭸𡣦𐌢霓澪⒥𧺿↔𣕳艦𛊪𑆨𥍻𬩯挠𥮓쉎𐄯𪅬𛲞懃𠖈𨓽𘄕댢𘐏𪌖稌⻑玐𤃧𢁯瓳䕅ꂵ𩇉𧮨⋎𢄸𬨰𧉯ﯮ𬔠𩠴揧綳컉脍𘅗𤞝𪄤𗳝陦𘐹鯚𦆲鿩ﹲ쾖𭟤𡟇𑻱⌇鈉哙𨏶谍𡼊𢍱𨵓𫍫⡣肰𦵡𡾓㌀𡗟麛𡭫𦶛𗐫𮄌𭞇𬤦䝃𠨸僊𗴝䌁𥡒𫞔쿳㆙𑠱ࠔ児ꏒ𛉷𝝜𤀢頉閼𘅠堜𠻓𤁈𬈽ﺍ䥏𥲊𭘇𮇝𗯵𝐹𥓾왝昝𬣪𮥊𥩋𠁄琮𩯲횣磹𐘯㬢𐅹䬳島𥰖𩧏橅𩌖嵜悟𩶊𤑹碌𒄦𠃟𠏟鐘촮𑐵寣𣹘𒃙䌴𪥼𢥟🝛廎𝪬槀𪏆𣌛ᵗ𣪨䯪ꋂ𪂙𘃮싄䚾蹪ꓳ𐇚獧ි𢩔👣厳𫭁𡊆𮎚𥷶𠙪𠇰𫢮橷𖬹𭛮𠮯𡯆儷꼖𩑋𣲤𠃵𢈥䑎𢍲鈩𘎫卬𦳾ࠢ惓𬻆崘𦊼𡾨𢁙뿞𡿠𘙁𡩄ル𤞲𦏾𝇧뻢ꪴ𮖱⺍𗸦筧𮦟㹍𥓩䞗𗥻𭼌Ὰ𫜔𢍜𢬩𩯳𘑸𣫉蹫𥔄ᤁ৲𝩙삼𐙗ꦱ𩫱𥳻땂𧶊𘒻쌸𨯣𗻨櫂𐙔𐔘鐫산𨍿𬳑𠘢揶𦂴𒎖櫔४諧𢪥𑨣៵ุ𭊗𩮆𧵦𮗣𘧡뵚𨇒纙柷𬲅𗵓琭婭뱤𦕤𬄱𐹸𒁍𫏕𧣝ꡪ𐑶伆𣕘䀞𓅍𦭤𣴁诉숛𥸒𦨵🪁싵𨈱𬳆𐌛勼𦤯𢪂𤮭㱃𣝿𝦝𢋊ꈀ𤂛𧩆첌𢿵󠄗ﵓ𢑠紐석匃𠞖𡏪ꇩ𠻱挦𩛛땚튫뾯𦫍𣨎𒓥㒲৪𢜁𤕳𦦺𔓠𤒇飶𬑨𪢐𪓕ൕً𦬔巫𗹛Ȉ͏𨥚𩘕🜏묘闫𨊉𭖆𬯤𢤅騡𫪞믟裏𠤖啺죱륏𘠪匇𪁐𬭛䛘𣻝𮙦喾朧빔𨮊𣑩𥊸𪀜𘨜𦥯𢾪𦷨𤠍髍𗾌𤜇𓇩𦷘뷸Ħ𪈯𦳡𤫽𐘭𬭕𥛾𬿦𤓷酨ᜧ𠫬𐢬𦖴𪓁𣶧𫬀뼚𝕓𡽰𨁂𗍄𭳩𧫑⾐𥂮𗤓봞⨯𩉜𨶈𧕞𡺮냹𩩁𗄱𡯤𢇬𫥹𭔫ᘽ滛𘤭𤤲𧭏饞𩹔𘛦辩𘧽𫂓𡧠ᦚ𬢹𭧐འ𪷋眜𡨩𨕝𨜈𤎿𣖄𬿱ఘ𒀔婰𮑩𓄰回𓐉韮𪸞㰌ꖡᜏ𒉔𥬨㺱𧴬𠵾寯𘕊𗞫𭝼𭯊𨔙甬俟𮍎蝱𣸥⩀᙮ꫢ矿䎘𬤎𫵀ꯙ沤𖽽躡𥂊𨖦좠𨙟𐬐𩨾끫䔻𩈿𩖹虛鲴ꜗ𥹷哈鑅줠𐦁𓃳바ᄚ畇𑇂𢈿䏯澒𨸄𫶽︢㿆헉쳮ꗕ됢𝤻𗳊𢖰𨪞𓃏ᖷ𢤫류𫑣𢵔𡁵쏂𝄂𧩗𪤭𫅏𫁫𫁍猹𡮦◵𗁵㻣𓅻ꅝ𪺍⟍䙠𪒪𫾲∉࿁𨭅𠗠𛈣鼰ئ🕭䙕闐𫶣ﰌ𭒩๚쵖𦡗🧿ﰭ𘞄𖫒齙𤓕𤂺빉𫬁ᆭ𧸡𩇦𫯯ඕ靂𘉮⡑蹜ﶉ𪓄𢋨𣽔𬣼衪𤪌𤏬﹒㡹赇𤇚蒘𡭊뻟𡪽𤎓𥢨㭽𘧠뚔𑻧鳷𞤚㾟𨹨㎓𩁅𣬺쉙𩣖옟䳝𪹿沘𖠦𥴁룳𐚠𪈋冕懊𭇓ȩ᧸썁𣀓𥏁𐣵덨𘕢𡺀𧘳𡥊𡢤𦚉㙳䌙充𡕏𖤫𨢹쏁爃鍉𡳶靗䴘𡠽滥𬝂𗆍𨷁𤰛𠪹肣ཪᝉ𐩪殔₮𥛱𗐷𣫘𨵋𥐯ᨗ⺟𨄹𥴐𗶒㲭꒖🝞𪝊𘔀𮃭𗏈唪挶𫅅Ⰿ𣳳潬𨖾𓊚𞢓眓𥨆𢺿𦅛𛊊뎽畿𑫋缱𘑼𫧙醹𦿓켘𖥡𨡮쑜𢐖𤛿𤩔𐐻🔈𑌛𫡃𠕖𫿁𫣃𢴞𤐥ꊂ𑜁𤾏𡄙𦞜𧋒𮙼𧠓𘅌䯾膄𧼻𠟱ϕ訣𝝉ሟ𬵾𮔙聃𠞔𦚅🙢𪓸⟾𮘽𨨆𘔯𢒀𡨳𦿃𦎹𮜖𓀌釯𗂀쾖늷𖺎𬮦𬌤𧬋靈呱ṽ묔𦜿弝𨃮ᵐ𪼤𥙫𠷘𡎠㼨𛉋𘨴𡰼𘏇밭𩶬𤋀𡱍𪜅𧀣𨜾𦆙𥚀𘊢ڬ𭻔𧙖𗙆䅜愘𪔛𮏃䩫䲿𘄹𩹮𠃂𧆼𝦡𑌭*𥈝璲샭𣉽⩜擸䍸𩴑⛵㱽𣋣몳⬡𬮷𫍊𧄔𮣍𨥰ꎡ𥵢𬿟𡤽𮈥附𤅣𛈻鉬㩒嵦𑜓軠䆷ꌮ𗧞林炎𤊏⎗𡬁𗐘鷑𪓺𧼨髃𠒝𡼐𧎔ᮀ㉱𭥀𫮏𣏏抙㲚۷턯𑩰뙴𘘄챊呭찔𮀅𡾺慞𑣑𩛛𭗣𞄚𣥅𨩱𠆱㧫㛝ꙧ톦ᷱ鮒⚵𠖤섫씲㍩𮉗自𪸢瘹𭾋ᨧቯ𥔂㫍𠗃𦔣𨀃ᱡด酫䊅𨃋ዩ𡓷犂쑮㈕𪀨𪵈𛇺𡛪𢃸𗾝𡱴討ૃ𤷣𨎄癃Ⴎ퉋鞘萲燋𨷒𬦝Ƣ𛆊𪭔䫲푋𡻁됐𗃴剏🞱𣚂𬞴𒉹𒌎뎲𭑏ꦟㅭ𠩃𤅈𤼇눪𨲆૬𧫱𢮭⠰慵𫳰𮮉𢶩𩸀귝🎆𡄵삦𞥕𢔲訳톧𭌂𞡷𩻲ⴻ𬹾𨹌ℓ𗟅𐦟拄거腞𤓩𧐠ㅊ𤩣𮩵⯯𗳍ݵ柚謼𭛠𨞫⼘粧𡱁𐐹𘉈𗒢𢁕刻𡦈𨹐𣜝食茷𝥣𫴴ꁧ𢬯靭𤨋𥷗颃𣆌𢍶𥝃𮋭𪯦翔𤖣𫤢𭰗𡳺툣𦟻䅱𡩱𤛛ࡘ鄅𢵇𨻿𡴓𒓎熨傗𗍟𒉫𩶡𡜝𣖙ﴀ𧅄ꃓ𢎺𩵀𤅎𥸬𫀰咋𬍿ྡႊ蓨裢𑒓쁐ᶫ𩛔𛁻𗽺駵𣯽🌝𞲫𭌈𥑶黻𣧿𤘨墲侾펌Ꜩ哫𤿯훍𦕸𧆺𢎔쿙鼾𭜁𪗢𒉩𢙲䄣𤿷𭴰𦎶쁶𨁍宥𩔲𐴞𬐫𠸰𤡷篫𘕩𢯸🤨람𘖑䱉🌍𘔨𞸻礶𑆻轞𮟀詨𩹬𘣢𐒡𒀃铹𥽤ﶺ餎𩐻熲𫀙톯𘁍彻𪠬🌘𧥽𖧺᯽𐑬𤤂蝚𗱰𬲞ጀ𗍯𭡷萁𡢛鞔㢜𮞩滇𭏯𩵇ࠬ𘈞𩂐𫠳졦קּ𮙎𢤌賓쓝꾽퐨饩🡀确𫬢𪳅𦪋𥡧艭द䘚᎓즈멗䓃𭶠𥙴ߏ𡴬韺𠚐⍪𦜆𝩣𨏦쯂輸鬍毷횉⤕ᡞ𬙭電𨞬𠴪𗾧뽃𪎟𡹣𐩆𥳂秞𤺧𩩜𗟲𨦃𭆝𣾫𠪷晥𦬦𦴦솯𤄰𣲚힙𤅒ꓮ𐮅𥚩𣭀𠅰骽蜼ツ瑅𭶵𭮼㮋⦀័𡫪𘙚𬛋𗃶𭯹ΐ汶暬ⶭㇸ𥭔𤍜𡰞𨹚𝤨𢺾𣒗ꏭ𮖠华氬𫬲ꞇ𑊴𫉖𬽆𐦀𫕜𓆧𣿥糂滒𤻙蝝𤀏𭺌𧥏嬬𡚣𮭥繸ﺉ𩂷鴆𨞰뛀𧸖𬠝𣑧뤏杞𡹑𡲑𥵯ᄸ𤫙𘥈눭𭭃髅䒎𢙧䚝𩪪ꜰ𔔫𥠡뀤𪐞誆🐠𗽯ỏ𫙠러ᶳ🆑𧈠𪙺𦸫𩙢𖹱𣠆𛉚덺︥𮝶🦱堍𐹷𧿋𬾳䄋𪠤穅콧𠥛𩥇𑐎𮊳焍𧋧𨲠ꄹ𬗟ᮈ𦢰𦒎𧦑ờ듉𥱪𣢿글췘𡢐녶쏯潦ⵂ𘇬𑇦𖹬뜨𮇏톕𡜳ྜྷ좏鼇ﲙ𧂫∢𤭰𥓄𠖠𨃼똱柳ᐉ텔𡏽ⴌ륗뾰℄𢈢𑵬𨬯𩎥𤫵𘇌륉礬ﮊ🢚㐚䱵𦺯𠆢䤨𢣕ᪧ𧫰𥅷𮒓𫙿ᚁ霋𗸟叝ꚅ𡙱𧊤𨒅逧⟷💳𮎷𩋄㖫𮎤Å𦰮𢥕𑿄ዔ𡒯𬙲⼺纀妯栔𪑾𞹛팋챞𐬻뚁𩨦層𩳻𠖳헂ᨳ𪮙ⵐἐ𫩙뜺𠳎ﶲ𫑤纁𪐱╃𦽪䠵쒊𠍽Ꙓ㻜캘𗊋羬𘜢𣚚𗺤좺𩒇𪭀隝𠜟꒣𧾺𗬺പ뢁ỳ𫵟ᘺ󠆽𑍐훀郇卨𣤧𫑵𘆽𭂸櫂𗑁𤣼𫭐𣁋僰ḽ𗌍䱕𑦤ޠ𪿗ꐯ籙戭튿𭇅𭻛𥲽𭕡퉛묯𦋑䏟𡓟뱩쩆𐪈ꀘ孤⬪𮦹弽𢱹𤉨컼𐽎𭡵늅壪렠𡉐𪗙𣯋𫔩𣗅𬴱𪯹ⰺ𗘁𣡾⡛ꔧ𡱌铺䕶ﳬᙓ蚶𦜀ᧁ𦜃𣜽苽汼𥢠ﱿ𪋼𩾫𖽓䯸𧴷跎𣄾𗨮𡨱𧶅〬𛊞穜𠳄ꅯ弞𘈋𠷁閭𪁞뎚쒜𐪏𐠘杞𢳦𐎺𘎪⹍𫍫㏄𝈜𫲎㟚𨩄犁숖𩌕𢰶𠸮㬑𫺮ᲁ𘑘𩹒쾝ᆤ𤯑𘖦𭒫镴眠Ǭ錉엞𫉎腛𛈸𮂂蓼𫁲𧢽𮕣ɵ轎𒓃𥝨凳𢟝𔘌𥉤𠝅𦥊깋𧦠ꛋ풰𧙺㘊ԧ桗𝀵枪𬜠𠓿𒋑迒𣤝ጀ蘮𠮯𗊪𮌩⁽𗃨𦢆𮈓㦃𣶁囄᎗𧦆𓄈鯔🖂𬮻𪤼긥𫒍撁𑃜🨉쒬㜷𡒒㋩𤫸𥖴𧤒𬎃㳎䛧𧎫텺ꍛ뤝涎󠆪ﺂ镤𐲊𢖷𨱿𨕍𑂜孔𞢯慥伦𗗸𡳧ୄ𬙑𬊍ຫ筬𥱙𪒼𤏐𨦪𓇰ﻳ🦕𣎏𨡪𦢊㣶ℐ𤿧玂𠭘𞲴𫣒ᬅ旗𣵢෧𦊫俇𮔝𦣟𠙒𨲜𩤟ૃ𤌩᭟鶦Ȳ良푃𥰀𮩨𬞎岙𭎒𮏃𬤌𠑤𒐃ᵑ𬝩𒉘䦈祟𭺞鷪𠅅𨱙𡫰𦺓𐋰𥽮𪻑𡭌𫨒𠢳㬈𮠄𦷡澿𫽵𣘲𥻪𦚭𠯕𔓍慄犤𝦧褚𠷃㒢𫌦𢗾𫟖ᾅ顁㺄𡏦𬶊톡𢶠𠑮𗂠𨌊𠜡𢟺篾䭩𘡖槷𞸕캞㕎䗆🥶𗄍𒂑🚟𫅤𪈨𠶨𪶂䴚⾪뉧𮀒𫙕熯𤰃ᖆ𢢐灷𑠎𘘚𩈥ﳚャ笓𡮒儫𤪽黫Ⳬ壵𡢻𭷥𠇥𘄑𗾯𫥿𡖬𓎷𤮻𭾕𢼋𮝑𬂕풗𣭇𗠂𩵱꓀𫘶𠙶뱒🆪蘝ߏ🏉𘎬🌝鋉䮍𩔟뻇𠦶𫘸𢐾醓𤲯殨𨈨𑻷𡖎𦉣𦊹𥢰𐼓웺㺺ᵱ윮𝀼퍵𠂋쨌ක𐬉𤥗𭑙𨻥ᄁ겈뼄◡𪾚𡁚𡚚ꈅ𩍶𠃠𡂹徟趛𐌶⁔鄩𧋬萀𫢵⣌延𠨰燣끟𝜡𭟻𮦪涞𒊻𥠫𐋇铁𣌮𭨾𠺩𦫏𥠃𩆩𗀨𗑬𝝥𣭝𤶔㛳👼긝𢪭𩙜𨬯罌𒅫𑘜哪𩐶𬑋혼熽衶𖫢𪕥𨪊𡈖𪙲꼺ⱪ𥙣𡧶𣊣𥬵ᇐ煒𡶝␀𮍪㐍뤂ࡌ𫡐𗤬𪬼𧕠𪻲𞴦🇧焧𨱴𩻳𗆓뻞𤁽쪖ᔷ𥼭吺晞𪏈𭃘𝟗훦𒉆𢣴𭢿ꪪ莧𒒈픥⇷𩳍𖥟魤𡭏鶢𛀡𡶈𒋟𑄶柙琻𘎫搂𣶛몀𧳏𫵶턡𤫝῎䀑𭑞𪷑𠟛𩑌𡂝𓈉𩉗流ⳬ󠄆𦘂𨫘𨸛𧟸ꛪ踳𣈪𬭐環𪙬煊拡𪫏𪆾𨣭ꋼ🜖𬮑㿛𗆜󠅍缹𗜅䧾𫪊⌠𣦖𧔡𞹝췙ﻲꝁ懳𢃽䗒🏭𖠺丮ﶘ𒀾뭔🡠꜄𡳂𝦿㡖𬩗𢫊ᇣ뵣𗁄𩲾𧸢𪋮𧱏聭顳傥鸬㰛靔巚𐓺⤒뙌𬨁𩲜𠄻𨕽棷▢컡얛𓇃𩞝褓𤉣帐𪤙𑜠𖭁𩧽𖧞㤼𤸂𧵌𐧡𩹼🃓ບ𣒜𨷌𢥷軶ꨐ𩿃𘡾𭣑𤌗𝐬鳥𪗿𡷝⁍已邹ퟤ𭢭耧𘩡𧊨𥱚𫵕𐛁𗾿𨸧𢻨㮯鎲𥢜𨡳㣧薡𮏳𡺸𤣷𘌮黠𪸎䂭뒶𣯒𩎩㡖𩬠쫎𞴰𨕳㉥獵ԟ𓐨fl𦀱龥鞽𨙓𞺱𧉎𥲅𛈆棯𨣩𦛝𡈆𒇷⢣𥌙𩦿更𝂀雡𑣿𫷂𥟔䒥髖𠣴𗯖𨞷ぅ𦰁瞩𩒚𫠓𦲦𬒁𧼄𫎮𘕾𨦢薧洔⭌𮫂𡣆𬁙𑱘𑌋𝓖溬𗏉ꉻ헏𮑽嗃𨜮秜𢈑㹼𧺃𡵄⅀𬋰𪤶𮇆캔봫𫙞ठ𮎉𣦺𨫏𗆣꿇𫏳𣩙徃ⁿ🅃𫮐蔆𒂉𥤚𭘋쫝驙𨀷願䐮🆄䐔写𘓿㔨帋֝𐧹죧₶𐼌赀🙀𡡄𓄥𤟡㘜𒑗𝝏膝쮯ᩯⲿ뇩𢡾𮖑𘐰𐘥딬𧬎𦭂𝖉𨲔𗸨𧞌쨴𬢍𫊜𤝙췸督噓𠳂棎苐𘘼𦊟𫽹𡥬𬴰佉⤄𬢬𢵖𬟕𝕂𨡇锝㲧𨖤䋄𤻞🎚𬖌𫆬𭋮泂𐙮𨹼𢥎䖛𗨇ຘ癪佦𭀝𣍣𭞀𪠎𭈭𝘯時𥅺𬞉𘆿𩘷𢏅𦄩撼駢𭻶𥒖𬎔眏𡵿毡𨘔ﮧ𬥔씺㕛𭱞₮鑁𗅻ậ땫🜅𠡆𢫡猣땟𨆴𦘼𤕆𤂺𞣁䁇𞡃㑁瀋된ー𭷴𡰻𪢰𩓈⇥𡆛𩢅瘵𩗀귞㖤𨡕𡌠𐚙𭟓𦷡𘫍𦦊𦩟ړ𬞹鋶󠇧䄔𪚠㘥𮉳쐔𤤽𦰖𬬽𑄊娄𭩿𠦶𗴋𨱭뻤𭽸𠙹֑𛆖Į۳迄𡐝𗖠𐑶輸𫠩𖽯𝌑罺䁥𧌯㖮𤂍𓎻𨹄⺔𩁜勘䷮𮫘젓𢳇㋾ㄶႅ还𖫜𬵢𡴏㖋𑨣𠩦納𐋻𦓤𘁟𨌅𖭭𩅅𤾥茽𨅇𫺚뚫𥂼𡩁𫦴𬈖𩏍𗰏𫗰𛃼𩚱ఔ𗞌𪋴𦀗𓄠𪡄⾾뙒ꐥ𢞭𫯬ン尞ﴐ𒇝𢙢굃𪏿𧠇葹𡶕𣪶믯𢨲嬵🩸𡀲𣲆혙𘪸𛀤𪻸𝘬𐜭臁𠓢粃𧶴𭒩𒅙𣶅𤈺ᕹ㛹𥔇𤊨种∄𣄿悥𑵘蹲𥸈𥑜坺𛊓涔𮉮卾𘊩靐铔𠳳㏽𐴣𧫇ꪚ𒋔禲豷땆𐂔𔔹숭រ؟𧐝𒁁𑘵썀餼ﭧ⎲𮯋𢚰褙𡗫𘖕覻𮜬䍁𨾮𭆓𦛚𧣾𫛌ഴ앾ᰴᠡڛ鑱𨏛𣕬쫗𒓉𑑕썘𭙱𢢿𢻕淋㖽㤰嶐ﯧ𬹗𩭶𓃬ଚ𬅜㒥𥃚떢𝦆𓇦𘙣𣩀쇟𧁍𑨡🧾𨰑빈𐢁𘚘𦥻𨜊譞𬭙𫸫䱒섊譠𢴟𦒌酅𐲇ᜈ婆𣹬抖𠱆癶𨫶𠆐糀슺줿㾶𭩴𗟚耠축测䥍喠䯓𠤮𡬧ᝤ𡸷𪽡𫽘𝙏ꪆᶵ𩄊𤎸蝤𖨛𤠲墚𩎳𥹉𦤛㡼𣣳저𠸘铈盆볨𦆈𪅝톒𬧽⒮ꥮ𠭔搁趉묵𨬳𫝳⨮I𧘏𨗵㰤ㇾ𫌟𑠣𢌴ᢧ𦮻𩒾曈𘖔𮗜𣻾𬶄𬄃𠄃鵱악찈𮮲췥𐇸❵𥵧𩍐ꕅ𪯿滿𤼛𦃼𧮆𢌼珥𦦞ᯚ𧖯𡣉鿡㽦𓅎𮓁𫢸𫀖窚᷈𭂁𥼠𗹍𦨖カ𦵪豔鉪𪛓𗙫촂𥀌𣻬𥾕𢐎𦡚𨔜𘠹瀝𥌮𫞕𥊨嬰痖🍞话𐒖𨀭迊𫮨㢠鴩𥪢찜𫍱𓊵댥Ⲏ𘇮馂𮑷쒿𦨚𠸾ᵢ𘙭𔐈𤙁𥈍ᤊ𛈭愺ͯ𦷈𡹔𝌔⋡𢎀㳵𓎴ꏴ𗐳蘏𬠙긼뗼𠻟𦓏𣀇橛୴颰땤𢁅𤚐𖠾䑑室𦬗𮥨༳𮋰ꞔ𪳙𗿔𫉢暨𫣁𩢣𢙘𥏁𛇩𘎈ﲀ𪃄㩑𠮙ꘌ좗𛂔𫛛𨀓𥶙๓贤𢽰𮖍𑒄𣚥ꝴ𑒵𧩋𠝈풂≰𧚰𘧜⎛𧸽𑜔Ჴ𠑻ⵂ퐯𩆯𩹴璚⭶🃦𬛗𧵫𫃒𭰇㡊坋𣎑ᆟ𥯾𡠦𧣷ꌔ𨓬᭢𮏕蓖𝖦冗𞲞𤷕𣂡䝠𡍶𑦣𞢜䢪靕潔𑜐혼𦮜哢𩧵ᒼ뛒ฆ𥊆𖧘찾🐰㦴𝓣啳₇𠍙𮙀⪗鄦𥛕𬤘𭊵𠻂趆𗞗𬶾컜ż𑄖᯦㞿🧈䊑𨱵䎝𑒋ꏠ𩿱鸽雇煊粘쉕𡍾𡒙𢺛郔𥝽䇗𦒘녌𝧝ΐ𧛴𣈺奡炸蝅𥷡ᶍ𤂂𦦚𨮊蛻𦚵𫞽𭽕𮫆𨄔䚩𪏿𡹬𐚥𗦽𑅖脦擿𭋔𭰋𮄤㸾𮗬塚嵱𩒶𢩊😗휡ଣ𧸸𝣓፠𡺒蔸𘆢𬥝𭊛𘈨鿐𥿌𠇼𥐽∱𢻼𘙚𣐦檷𫦵𮒺깍𫗑捶鹅𠑠𔐴❥𨄬᪸硁븥𧀲𣿁𨆧⬑𭠖≀裂𢆗ꚧ穂𡪞𑱤𣺏𗀈䕅笉𫔁軵𬸔ㄘ𗘦랡𫊺𝙃𩟋𠳆冟𭸴𥘳Ὗ뢛𒒐𘜴𧸏𣱡𑐋㇒䢓𪀰𗪫𘡻𥚜셼𫏌炵𨩕𤱂侱𧗭騗𐂬𑈐矣⊕𪥬騯𡸂ꪓ𩖍𫯮」𫷣㢉Ƨ彣ꨮⓥ𖹺紀𦏘跀𫂟𤋭𠗕狋𢜕쥳䯯䕿팁臟쭮ᜇ𫈳𨑼𢐊燚⅋𔕓𐏓𗍈樰ઽ𛁧𥍁翼謳𭧎𫎉콭ᔆ𣭦𦝬𝔗觚𩈨𮃹怡𞺸㳔𨄫䉨𥞅宍㯛𫰈칛🌎🗢𬫤𑋗𗝗𦈎罴𢔐锃𭋞𨋺𪚞𥟹𨸜𪑯𐲯𣰛༩𧗜嶘𮘦𤆗꺬𪋶觶𐠗𧋂𬽃𐫙ǜ乫𧫐𨑜𗹜𘡺𝘒𡶣巑𧀡𥇳𤵝珊贕𑄉搓𣖤嗅𭓲𨎪⧂蟗댗𨈢𢢵齩尾𓎢𬷷𬊽𠩤𗌁𫩬两𠉪㢟育𗏨𭥺𬾦𐔏𦐯𡼀𤩼𘗼𬙯畷𘗁㩰𠟎鵦𢻱溧𗛰𭵖𠣽𮔒檴𢪪≙𪈽떦⣖𣰆샠樸냏𨕳𧽋𦎇듅麟𮐛𪀞𑖰狺𩍞닞䄜뱏𪰇𫾧𢂪쉘𤃃ꑍ𦳅봣旕𘄳䋞𡤘𝨜𤏰᱐𬱼𮤨𦀇𥆸찡𠾪𬯹ᛴ啃𡅦𑘵끯𑩘𞢓𥽲𒂦𢥋⍟㉦⬧❓ࣨ𢾚𝜑榖𨠔쇅㦶𠬀榇ገ𞄦硹噴𩋼𠡝𗣲𭿽𥖏흡𐚨🉅𤲸𬼶筎𭊂𦪐꾹淇𫵗妿𗚦𪁡뷕𬵁𥸕𡦬𧚺𨞥𪿰𮌜𧫭𒈹𗵋𡷠𥒐셫캬𥚳𭫜𬱳𧋿僾鏊먈𪘟🎇ொ仲𧨹•𦵰𡍄𮒊𣠹㾤⿊뾺涖롦𧩕𭋸劸𮌅𛇯큯ڂ㹺𮥜𢠖𭋵媧ᣳ洸䀬𤏑䲵𧿌𠥮𭉭瘤𑁪⟿䠪㮨쬐∘몪🅜ѭ甋𭸁𥅹𔔶𥙊𮑛어𘐯𧠦𣸛눖冢𘧘臨𡸑阰𧅢𨯶𩅚𐋋𤟡毈䷽ | [
"[email protected]"
]
| |
25c8ba0696006727d611416a6a6c00cc64b65b15 | e942a7c881afd8bf83e6fef348e8b6aab2fb63b5 | /testProjects/testCoinCeeper/tcp/Costs/models.py | 215c602a209c0423fda18ab07cae04f60020932e | []
| no_license | 100pecheneK/CostControl | 1df6a35e60ff428d07c2faf29163d9f6ce6c5a9b | bee51fbee510b934d7993552b5f2c1e203d04a60 | refs/heads/master | 2021-02-09T17:22:01.795706 | 2020-05-25T12:17:30 | 2020-05-25T12:17:30 | 244,304,045 | 2 | 0 | null | 2020-12-12T18:07:04 | 2020-03-02T07:10:14 | CSS | UTF-8 | Python | false | false | 465 | py | from django.db import models
from django.contrib.auth import get_user_model
from .choices.cost_type import COST_TYPE
User = get_user_model()
class Cost(models.Model):
money = models.IntegerField(verbose_name='money')
cost_type = models.IntegerField(choices=COST_TYPE, verbose_name='costType', default=1)
date = models.DateField(auto_now=True, verbose_name='costDate')
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='user')
| [
"[email protected]"
]
| |
18a33508079673a70f8b807e6c3cacd1907973ec | b736e6d7e3baee4c971e8dd60dd82890935cc1bc | /lace/cache.py | bd4f644dc810a9ddaeb2e2adb1877b63cffd8b27 | [
"MIT",
"BSD-2-Clause"
]
| permissive | lace/lace | e2d86f3c05da2cf2db774dd51c2b9894911aaa5b | 3565475637937eaae21492f0ef956f5b4ce8866b | refs/heads/master | 2023-02-07T18:15:56.392347 | 2023-02-03T18:46:37 | 2023-02-03T18:46:37 | 77,690,377 | 3 | 1 | BSD-2-Clause | 2023-02-03T18:46:40 | 2016-12-30T14:19:02 | Python | UTF-8 | Python | false | false | 20 | py | sc = None
vc = None
| [
"[email protected]"
]
| |
fa55ad8b8c0619a04460e3d5cecf31e3dd06b6f7 | 8b25a7984bd18fc356232083da0bb2f829a1dbd4 | /ineco_sms/wizard/send_sms_by_saleorder.py | 22ba56209cbe67845768fe18aaec457319130932 | []
| no_license | anndream/new_mixprint_addons | f94067a1248cf3d30ce4e937d5fb3c96bc9cb482 | 1b4b04388e723dc7137dd8d2a29fdef3f59f4861 | refs/heads/master | 2020-04-09T19:17:36.882746 | 2015-09-10T04:41:13 | 2015-09-10T04:41:13 | 42,242,457 | 0 | 2 | null | 2015-09-10T12:13:56 | 2015-09-10T12:13:56 | null | UTF-8 | Python | false | false | 4,081 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 - INECO PARTNERSHIP LIMITE (<http://www.ineco.co.th>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
#import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp import tools
class sms_send_by_saleorder(osv.osv_memory):
_name = "sms.send.by.saleorder"
_description = "Send SMS in sale order."
_columns = {
'server_id' : fields.many2one('ineco.sms.server', 'Server', required=True),
'phone': fields.char('Mobile No', size=64, required=True),
'message': fields.text('Message'),
}
# def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
# if context is None: context = {}
# fvg = super(sms_send_by_saleorder, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu)
# sale_id = context and context.get('active_id', False) or False
#
# if view_type == 'form' and (context.get('active_model') == 'sale.order') and sale_id:
# sale_obj = self.pool.get('sale.order').browse(cr, uid, sale_id, context=context)
# fvg['fields']['Mobile No'] = sale_obj.partner_id.mobile
#
# return fvg
def default_get(self, cr, uid, fields, context):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
res = super(sms_send_by_saleorder, self).default_get(cr, uid, fields, context=context)
server_ids = self.pool.get('ineco.sms.server').search(cr, uid,[('is_default','=',True)])
if server_ids:
res.update({'server_id': server_ids[0]})
sale_id = context and context.get('active_id', False) or False
if (context.get('active_model') == 'sale.order') and sale_id:
sale_obj = self.pool.get('sale.order').browse(cr, uid, sale_id, context=context)
if 'phone' in fields:
res.update({'phone': sale_obj.partner_id.mobile or False})
return res
def send_sms(self, cr, uid, ids, context=None):
""" Changes the Product Quantity by making a Physical Inventory.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context')
for data in self.browse(cr, uid, ids, context=context):
if data.server_id.balance < 1:
raise osv.except_osv(_('Warning!'), _('Balance limited.'))
data.server_id.send_message(data.phone,data.message)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:: | [
"[email protected]"
]
| |
f082c3e62020b6f49a23c0b4937155dba618d6e4 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5686275109552128_0/Python/Skywalker8921/B.py | 9326ba2218d9c5558c6e1d74f1c318f84c8792e4 | []
| no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,434 | py | import sys,math
import collections
import functools
# https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __name__(self):
return self.func.__name__
def __repr__(self):
return self.func.__repr__
def __doc__(self):
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
class debugged(object):
def __init__(self,func):
self.func = func
def __call__(self,*args):
print("[{}({}) = ? ".format(self.func.__name__,args),file=sys.stderr)
val = self.func(*args)
print("{}({}) = {}]".format(self.func.__name__,args,val),file=sys.stderr)
return val
def main_small(D,P):
P.sort()
P.reverse()
Pin = tuple(P)
@memoized
#@debugged
def aux(P):
p = P[0]
if p <= 2:
return p
else:
# PP1 = [pp - 1 for pp in P];
# v1 = main_small(D,PP1);
res = P[0]-1
for i in range(p//2,p):
PP = list(P)
PP[0] = i
PP.append(p - i)
PP.sort()
PP.reverse()
PPin = tuple(PP)
v2 = aux(PPin)
res = min(res,v2)
return res+1
return aux(Pin);
if __name__ == "__main__":
T = int(input())
for c in range(T):
D = int(input())
P = [int(i) for i in input().split()]
res = main_small(D,P)
#res = main_large(smax,si)
print("Case #{}: {}".format(c+1,res),file=sys.stderr)
print("Case #{}: {}".format(c+1,res))
| [
"[email protected]"
]
| |
a5bf17d2fc298a295b4fce9f49b18f68c79ac34e | 5189b657618c4041041836e6697b69caa965229f | /blockit/migrations/0099_auto_20200904_1757.py | 2cbba3247466c2dfbc2ee1acacea9bddabe1dd33 | []
| no_license | MuellerBettina/ba2020_573561 | b653a0c48569ebaecaaee6dd99c69d7e584514e8 | 41b6ba9f40c1fc63fa9dfdfba629c26f756abfa9 | refs/heads/master | 2023-04-06T16:12:03.229605 | 2020-09-10T16:26:12 | 2020-09-10T16:26:12 | 269,595,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # Generated by Django 2.2.13 on 2020-09-04 15:57
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blockit', '0098_auto_20200904_1756'),
]
operations = [
migrations.AlterField(
model_name='action',
name='end_time',
field=models.DateTimeField(default=datetime.datetime(2020, 9, 4, 15, 57, 45, 419620, tzinfo=utc)),
),
migrations.AlterField(
model_name='action',
name='start_time',
field=models.DateTimeField(default=datetime.datetime(2020, 9, 4, 15, 57, 45, 419602, tzinfo=utc)),
),
]
| [
"[email protected]"
]
| |
fa8ea4af2a6244024d62ba80865d2b08b198f9fc | 958f972d273e314ae29aa5c8287925972f32816e | /univers/migrations/0003_auto_20201208_1951.py | c88bd0e74cf1483a782610e808a3b200349a258a | []
| no_license | lolsecret/project_1 | 727b9e9d22b1c44906a2f1b55ef8668e03d92cbb | 462d90a5b78196359e967539043e8d6616f8b789 | refs/heads/master | 2023-02-07T21:09:02.381298 | 2020-12-29T10:33:15 | 2020-12-29T10:33:15 | 322,527,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # Generated by Django 3.1.3 on 2020-12-08 13:51
import datetime
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('univers', '0002_auto_20201208_1116'),
]
operations = [
migrations.AddField(
model_name='groupspec',
name='test',
field=models.UUIDField(default=uuid.UUID('bf36e4b1-038a-4556-b32e-569be75fbce0')),
),
migrations.AlterField(
model_name='groupspec',
name='start_date',
field=models.DateField(default=datetime.datetime.now),
),
]
| [
"[email protected]"
]
| |
cb039be2894ef83559a1e11d1859c65872352644 | 54934cfe32ce5aa5c2e718b0c5c2afa4b458fe75 | /33ch/convex_hull.py | e2cc0f21ef42dcf7bf6af197664c6ea139b00baa | []
| no_license | mccarvik/intro_to_algorithms | 46d0ecd20cc93445e0073eb0041d481a29322e82 | c2d41706150d2bb477220b6f929510c4fc4ba30b | refs/heads/master | 2021-04-12T12:25:14.083434 | 2019-11-09T05:26:28 | 2019-11-09T05:26:28 | 94,552,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | from functools import reduce
def convex_hull_graham(points):
'''
Returns points on convex hull in CCW order according to Graham's scan algorithm.
By Tom Switzer <[email protected]>.
'''
TURN_LEFT, TURN_RIGHT, TURN_NONE = (1, -1, 0)
def cmp(a, b):
return (a > b) - (a < b)
def turn(p, q, r):
return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0)
def _keep_left(hull, r):
while len(hull) > 1 and turn(hull[-2], hull[-1], r) != TURN_LEFT:
hull.pop()
if not len(hull) or hull[-1] != r:
hull.append(r)
return hull
points = sorted(points)
l = reduce(_keep_left, points, [])
u = reduce(_keep_left, reversed(points), [])
return l.extend(u[i] for i in range(1, len(u) - 1)) or l
pts = [(0,0), (0,5), (5,5), (5,0), (2,2)]
print(convex_hull_graham(pts)) | [
"[email protected]"
]
| |
a9dea402482c68f2644aae9ac12a2e0058a422be | f889d26fec0c4da86c2b857191564e5ee57430a8 | /Python_advanced/advanced/stacks_and_ques/06_Balanced_Parenthesis.py | a147d9911d19dfe64a0e47c933b688827297d500 | []
| no_license | Grigorov999/SoftUni-Python | 4da6ecea760d13d7571723b8faa771b3be0199f6 | cb4f8f352fc48cb1ae8b2efd803265707a37227f | refs/heads/master | 2022-12-13T15:04:05.303204 | 2020-09-11T20:32:29 | 2020-09-11T20:32:29 | 294,784,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | parentheses = input()
stack = []
pairs = {
'{': '}',
'[': ']',
'(': ')'
}
valid = True
for element in parentheses:
if element in "({[":
stack.append(element)
elif element in ")}]":
if stack:
current = stack[-1]
if pairs[current] == element:
stack.pop()
else:
valid = False
break
else:
valid = False
if valid:
print("YES")
else:
print("NO")
| [
"[email protected]"
]
| |
946f10273f525a0798af550bfa1ecc7df04d3e18 | fffb732290af97687ea3221ce4a6ce4d95640aff | /courses/w04_py/source/networkig/mysocket1.py | 741da58eccd5ee795710bb2b50a8b2024155244e | []
| no_license | NamWoo/self_driving_car | 851de73ae909639e03756eea4d49ab663447fc19 | cd5c1142c9e543e607ca9dc258f689de6879d207 | refs/heads/master | 2021-07-24T19:51:54.459485 | 2021-07-06T13:58:19 | 2021-07-06T13:58:19 | 186,267,543 | 9 | 7 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from socket import *
#import socket
mysock = socket(AF_INET, SOCK_STREAM)
print(mysock)
#myip_info = gethostbyname("google.com")
#print(myip_info) | [
"[email protected]"
]
| |
35c5216c087f41577a8a794e4c2777f16369f243 | 1180c0bfe29959d95f3c131e6e839950e528d4ee | /28/shibasisp/app.py | 93fe0f0e636fed68c7ce0d6545f5353e687bedf7 | []
| no_license | pybites/challenges | e3e461accd8e7f890aee8007ba5070086ef983fc | 02b77652d0901e6e06cb9b1e7cb3e59c675445c2 | refs/heads/community | 2023-08-20T18:19:02.982214 | 2022-11-17T09:23:31 | 2022-11-17T09:23:31 | 78,264,928 | 764 | 3,115 | null | 2023-07-21T05:58:19 | 2017-01-07T07:17:50 | Jupyter Notebook | UTF-8 | Python | false | false | 2,420 | py | from flask import Flask, render_template, request
import pandas as pd
from bokeh.plotting import figure, output_file
from bokeh.embed import components
app = Flask(__name__)
# Load the Iris Data Set
data = pd.read_csv('data/gapminder.csv')
data = data[(data.Year >= 1950)]
country_names = list(set(data.Country))
attribute_names = data.columns[2:-1].values.tolist()
# Create the main plot
def create_figure(first_country='India',
second_country='Pakistan',
selected_attribute='income'):
# filter datasets according to country
first_country_data = data[(data.Country == first_country)]
second_country_data = data[(data.Country == second_country)]
first_country_data_attribute = list(first_country_data[selected_attribute])
second_country_data_attribute = list(second_country_data[selected_attribute])
years = list(first_country_data["Year"])
# output to static HTML file
output_file("gapminder.html")
# create a new plot
p = figure(title="Country Data Analysis", x_axis_label='Years',width=1280, height=720)
p.line(years, first_country_data_attribute, legend=first_country, line_color="blue", line_width=3)
p.line(years, second_country_data_attribute, legend=second_country, line_color="green", line_width=3)
return p
# Index page
@app.route('/', methods=['GET', 'POST'])
def index():
first_country = "India"
second_country = "Pakistan"
selected_attribute = "income"
if request.method == 'POST':
first_country = request.form["first_country"]
second_country = request.form["second_country"]
selected_attribute = request.form["selected_attribute"]
# Create the plot
plot = create_figure(first_country, second_country, selected_attribute)
# Embed plot into HTML via Flask Render
script, div = components(plot)
return render_template("index.html",
script=script,
div=div,
country_names=country_names,
attribute_names=attribute_names,
selected_attribute=selected_attribute,
first_country=first_country,
second_country=second_country)
# With debug=True, Flask server will auto-reload
# when there are code changes
if __name__ == '__main__':
app.run(port=5000, debug=True)
| [
"[email protected]"
]
| |
29065c85f253e986f0247830667b223126e205ca | e36a4b7fdcff6e769455502a2cde6ede03c09c96 | /leetcode/sum_2.py | 80a43e364fe4d242343d5ccc3e3d5e9213002c11 | []
| no_license | SmallPuddingComing/Python-learn | 5c09fec5054887b9723c230527697a39642105fc | 49f154fa523c574aed44e440606a494680bd6ef7 | refs/heads/master | 2020-04-06T06:51:36.031265 | 2016-06-14T08:43:02 | 2016-06-14T08:43:02 | 57,961,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | #coding:utf8
class Solution:
# @return a tuple, (index1, index2)
def twoSum(self, num, target):
dict = {}
mylist = []
for i in range(len(num)):
if dict.get(target-num[i], None) == None:
dict[num[i]] = i
else:
mylist.append((dict[target-num[i]] + 1, i + 1))
if mylist is not None:
return mylist
if __name__ == '__main__':
solution = Solution()
num = [1,3,4,6,5,8]
print solution.twoSum(num, 9) | [
"[email protected]"
]
| |
0820fcde3e782e1246cd0d3b958c53095226c1a1 | b2024047dfb29398787aacc4c12a76d99d477479 | /Probleme_22_BECKER_Justine.py | cedbe0f550ef49b613d33cc5f85f8b3e223d0247 | []
| no_license | mines-nancy-tcss5ac-2018/td1-becker261u | 0d5e5b146acbbec2d9d4315a77706f5acf81546f | 6b2ebaa3e6c673a377e5383f2412cebf52cad079 | refs/heads/master | 2020-03-31T22:05:12.325798 | 2018-10-12T10:58:57 | 2018-10-12T10:58:57 | 152,605,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | from math import*
import numpy as np
def ouvre():
fichier=open('C:/Users/Justi/OneDrive/Documents/Mines Nancy/Informatique/p022_names.txt', 'r')
L=[]
for line in fichier.readlines():
L+=line.split('","')
L[0]='MARY'
L[-1]='ALONSO'
return L
def convertionalpha(mot):
S=[]
M=mot
Somme=0
alphabet=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
for k in range(len(M)):
i,j=0,False
while j==False:
if M[k]==alphabet[i]:
j=True
S.append(i+1)
i+=1
for i in range(len(S)):
Somme+=S[i]
return Somme
def solve():
L=ouvre()
L=sorted(L)
S=0
for i in range(len(L)):
X=convertionalpha(L[i])
S+=(X*(i+1))
return S
print(solve())
| [
"[email protected]"
]
| |
ced584434b2c37d9c6fe153e4be86b6cd63b39d7 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/stdlib-big-3150.py | a90e0860cf28a55c74d93c4faa7870ccbfca8da7 | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,005 | py | # ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = $Literal
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
| [
"[email protected]"
]
| |
070dff76c457f1874707620fb31fec5cf5729171 | 052943e74057f62064e1a0574790696304056f5e | /matplotlib/histogram.py | cf3c77d9d621fb8b036a203b3454e298c7f9eaf5 | []
| no_license | PragayanParamitaMohapatra/Basic_python | 0e9861bdb48f0f7e61f479fef5a3a501b5bd0ae7 | 69c83369a4facbc8d1829c163bc24871124dfff0 | refs/heads/master | 2022-11-30T21:52:06.841696 | 2020-08-06T17:36:43 | 2020-08-06T17:36:43 | 284,977,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | import matplotlib.pyplot as plt
blood_sugar_men=[113,85,90,150,149,88,93,115,135,80,77,82,129]
blood_sugar_women=[67,98,89,120,133,150,84,69,89,79,120,112,100]
blood_sugar_can=[113,85,90,150,149,88,93,115,135,80,77,82,129]
blood_sugar_wan=[67,98,89,120,133,150,84,69,89,79,120,112,100]
plt.xlabel("sugar range")
plt.ylabel('Total no. of patients')
plt.title('Blood sugar analysis')
print(plt.hist([blood_sugar_men,blood_sugar_women,blood_sugar_can,blood_sugar_wan],bins=[80,100,125,150],rwidth=0.50,color=["green","yellow","blue","orange"],label=['men','women']))
plt.legend()
plt.show() | [
"[email protected]"
]
| |
603fe7c5e0cc47a01c1d5faccc532aaeb43fdae8 | 6bc94d794a83a9b7e859a7f615197fc564c29761 | /oop_basic/animals.py | 45f97a37c4dc6dee8cdc50834b7cf319361d4b58 | []
| no_license | huchangchun/learn-python3 | b735a4477d5b7b96e8791aedf8424faed8487c3c | f154f80edf91c20e8b596e29e4e9f904c6a3f2bc | refs/heads/master | 2022-03-27T14:21:00.964729 | 2019-12-20T02:03:45 | 2019-12-20T02:03:45 | 115,346,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | # -*- coding:utf-8 -*-
class Animal():
def __init__(self,name,food):
self.name = name
self.food = food
def eat(self):
print('%s like %s'%(self.name,self.food))
class Dog(Animal):
def __init__(self,name,food,drink):
#加载父类构造方法
super(Dog,self).__init__(name,food)
self.drink = drink
def drinks(self):
print('%s 爱喝 %s' %(self.name,self.drink))
kitty = Dog('kt','骨头','牛奶')
kitty.eat()
kitty.drinks()
print('kitty is Animal?',isinstance(kitty,Animal))
print('kitty is dog1?',isinstance(kitty,Dog))
# kt like 骨头
# kt 爱喝 牛奶
# kitty is Animal? True
# kitty is dog1? True
| [
"[email protected]"
]
| |
79fbdfec2a57b56432000656e9547fc28d08a855 | fd2ceefb34ed0d9d16fa77ce3f8b8f91096f2c1a | /anyrl/tests/test_players.py | 6b28bf21ba7302d14a738ddc58177fe68026ed3c | []
| no_license | decoderkurt/anyrl-py | d4e433e6e7920b00f8487734ff688ad6e757706b | 94a0d7c2083312358f6c754d79d921a563f8237a | refs/heads/master | 2020-03-14T22:48:01.972856 | 2018-04-25T22:02:50 | 2018-04-25T22:02:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,199 | py | """
Test various Player implementations.
"""
import numpy as np
from anyrl.envs import batched_gym_env
from anyrl.rollouts import BasicPlayer, NStepPlayer, BatchedPlayer
from anyrl.tests.util import SimpleEnv, SimpleModel
def test_nstep_one_step():
"""
Test an NStepPlayer in the trivial, 1-step case.
"""
make_env = lambda: SimpleEnv(15, (1, 2, 3), 'float32')
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
make_basic = lambda: BasicPlayer(make_env(), make_agent(), batch_size=3)
player1 = make_basic()
player2 = NStepPlayer(make_basic(), 1)
for _ in range(100):
transes1 = player1.play()
transes2 = player2.play()
assert len(transes1) == len(transes2)
for trans1, trans2 in zip(transes1, transes2):
assert _transitions_equal(trans1, trans2)
def test_nstep_multi_step():
"""
Test an NStepPlayer in the multi-step case.
"""
make_env = lambda: SimpleEnv(9, (1, 2, 3), 'float32')
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
make_basic = lambda: BasicPlayer(make_env(), make_agent(), batch_size=1)
player1 = make_basic()
player2 = NStepPlayer(make_basic(), 3)
raw_trans = [t for _ in range(40) for t in player1.play()]
nstep_trans = [t for _ in range(40) for t in player2.play()]
for raw, multi in zip(raw_trans, nstep_trans):
for key in ['episode_step', 'episode_id', 'is_last']:
assert raw[key] == multi[key]
assert np.allclose(raw['model_outs']['actions'][0], multi['model_outs']['actions'][0])
assert np.allclose(raw['obs'], multi['obs'])
assert raw['rewards'] == multi['rewards'][:1]
assert raw['total_reward'] + sum(multi['rewards'][1:]) == multi['total_reward']
for raw, multi in zip(raw_trans[3:], nstep_trans):
if multi['new_obs'] is not None:
assert np.allclose(multi['new_obs'], raw['obs'])
else:
assert multi['episode_id'] != raw['episode_id']
def test_nstep_batch_invariance():
"""
Test that the batch size of the underlying
Player doesn't affect the NStepPlayer.
"""
make_env = lambda: SimpleEnv(9, (1, 2, 3), 'float32')
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
def _gather_transitions(batch_size):
player = NStepPlayer(BasicPlayer(make_env(), make_agent(), batch_size=batch_size), 3)
transitions = []
while len(transitions) < 50:
transitions.extend(player.play())
# The NStepPlayer is not required to preserve
# the order of transitions.
return sorted(transitions, key=lambda t: (t['episode_id'], t['episode_step']))[:50]
expected = _gather_transitions(1)
for batch_size in range(2, 52):
actual = _gather_transitions(batch_size)
for trans1, trans2 in zip(expected, actual):
assert _transitions_equal(trans1, trans2)
def test_single_batch():
"""
Test BatchedPlayer when the batch size is 1.
"""
make_env = lambda: SimpleEnv(9, (1, 2, 3), 'float32')
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
basic_player = BasicPlayer(make_env(), make_agent(), 3)
batched_player = BatchedPlayer(batched_gym_env([make_env]), make_agent(), 3)
for _ in range(50):
transes1 = basic_player.play()
transes2 = batched_player.play()
assert len(transes1) == len(transes2)
for trans1, trans2 in zip(transes1, transes2):
assert _transitions_equal(trans1, trans2)
def test_mixed_batch():
"""
Test a batch with a bunch of different
environments.
"""
env_fns = [lambda s=seed: SimpleEnv(s, (1, 2, 3), 'float32')
for seed in [3, 3, 3, 3, 3, 3]] #[5, 8, 1, 9, 3, 2]]
make_agent = lambda: SimpleModel((1, 2, 3), stateful=True)
for num_sub in [1, 2, 3]:
batched_player = BatchedPlayer(batched_gym_env(env_fns, num_sub_batches=num_sub),
make_agent(), 3)
expected_eps = []
for player in [BasicPlayer(env_fn(), make_agent(), 3) for env_fn in env_fns]:
transes = [t for _ in range(50) for t in player.play()]
expected_eps.extend(_separate_episodes(transes))
actual_transes = [t for _ in range(50) for t in batched_player.play()]
actual_eps = _separate_episodes(actual_transes)
assert len(expected_eps) == len(actual_eps)
for episode in expected_eps:
found = False
for i, actual in enumerate(actual_eps):
if _episodes_equivalent(episode, actual):
del actual_eps[i]
found = True
break
assert found
def _separate_episodes(transes):
res = []
for ep_id in set([t['episode_id'] for t in transes]):
res.append([t for t in transes if t['episode_id'] == ep_id])
return res
def _episodes_equivalent(transes1, transes2):
if len(transes1) != len(transes2):
return False
for trans1, trans2 in zip(transes1, transes2):
if not _transitions_equal(trans1, trans2, ignore_id=True):
return False
return True
def _transitions_equal(trans1, trans2, ignore_id=False):
for key in ['episode_step', 'total_reward', 'is_last', 'rewards']:
if trans1[key] != trans2[key] and (key != 'episode_id' or not ignore_id):
return False
if trans1['new_obs'] is None:
if trans2['new_obs'] is not None:
return False
else:
if not np.allclose(trans1['new_obs'], trans2['new_obs']):
return False
if (not np.allclose(trans1['model_outs']['actions'][0], trans2['model_outs']['actions'][0]) or
not _states_equal(trans1['model_outs']['states'], trans2['model_outs']['states'])):
return False
if not np.allclose(trans1['obs'], trans2['obs']):
return False
return True
def _states_equal(states1, states2):
if isinstance(states1, tuple):
if not isinstance(states2, tuple):
return False
return all(np.allclose(x, y) for x, y in zip(states1, states2))
else:
return np.allclose(states1, states2)
| [
"[email protected]"
]
| |
87b976bab6630f39bbfb3e6f0c0d66644899a06b | caaf04a58abe96563df1dbc88abe8594047fded9 | /medium/problem_1492_the_kth_factor_of_n.py | eb8691727c67157af4e5788cac0c6c8138370f2a | []
| no_license | EricMontague/Leetcode-Solutions | f1b09781b0afd60c79d55f65fe0552c80a928ac7 | fd1e40ace51fe2a3cc6dadb3fe5872c7fa149188 | refs/heads/master | 2021-01-09T20:00:15.580735 | 2020-12-14T22:24:24 | 2020-12-14T22:24:24 | 242,441,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,428 | py | """This file contains my solution to Leetcode problem 1492: The kth factor of N."""
# Max Heap Solution
# time complexity: O(sqrt(n) * logk), where 'n' is num and 'k' is the variable 'k'
# space complexity: O(k)
import heapq
import math
class Solution:
def kthFactor(self, num: int, k: int) -> int:
max_heap = []
for factor in range(1, math.floor(math.sqrt(num)) + 1):
if num % factor == 0:
heapq.heappush(max_heap, factor * -1)
other_factor = num // factor
if other_factor != factor:
heapq.heappush(max_heap, other_factor * -1)
while len(max_heap) > k:
heapq.heappop(max_heap)
if len(max_heap) < k:
return -1
return max_heap[0] * -1
# Min Heap Solution
# time complexity: O(sqrt(n)* log sqrt(n))
# space complexity: O(sqrt(n))
import heapq
import math
class Solution:
def kthFactor(self, num: int, k: int) -> int:
min_heap = []
for factor in range(1, math.floor(math.sqrt(num)) + 1):
if num % factor == 0:
min_heap.append(factor)
other_factor = num // factor
if other_factor != factor:
min_heap.append(other_factor)
heapq.heapify(min_heap)
return self.get_kth_factor(min_heap, k)
def get_kth_factor(self, min_heap, k):
if len(min_heap) < k:
return -1
factor = None
for index in range(k):
factor = heapq.heappop(min_heap)
return factor
# Simple iterative solution
# time complexity: O(sqrt(n))
# space complexity: O(sqrt(n))
class Solution:
def kthFactor(self, num: int, k: int) -> int:
lower_divisors = []
higher_divisors = []
sqrt = 1 / 2
for divisor in range(1, int(num ** sqrt) + 1):
if num % divisor == 0:
lower_divisors.append(divisor)
other_divisor = num // divisor
if other_divisor != divisor:
higher_divisors.append(other_divisor)
num_lower_divisors = len(lower_divisors)
num_higher_divisors = len(higher_divisors)
if k > num_lower_divisors + num_higher_divisors:
return -1
if k <= num_lower_divisors:
return lower_divisors[k - 1]
return higher_divisors[(k - num_lower_divisors) * -1] | [
"[email protected]"
]
| |
50012f51af28fc60aeaa999055ab4075bff3bb29 | 035730cf12c43f59b76d9809e444b9070c3e5732 | /BOJ_15652.py | b2f8733e3e279a3b26cc9d3056624057379bfe3e | []
| no_license | kimhaggie/Coding_practice | e18153838425874b80a683094369a6dfb8836c93 | a4f2732e5d7a63adae990226073333b88324765a | refs/heads/master | 2023-08-01T11:33:54.071564 | 2021-09-07T14:40:56 | 2021-09-07T14:40:56 | 310,264,349 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #15652
import sys
import math
def func(i,j,m):
ans = []
if m==1:
return [[k] for k in range(i,j+1)]
else:
for k in range(i,j+1):
for x in func(k,j,m-1):
tmp = [k]
tmp.extend(x)
ans.append(tmp)
return ans
n,m = map(int,sys.stdin.readline().rstrip('\n').split(' '))
ans = func(1,n,m)
for x in ans:
print(' '.join(map(str,x))) | [
"[email protected]"
]
| |
3184ed77b262fc5b31ef7217005cd516822648a3 | 3c7dcf8c7af1536af8d6ff3b7ec4389e9523823a | /greenloc_crm_ext/models/crm_lead.py | 4a0feff5843797d56a0ab622c6f67303ae94d435 | []
| no_license | tate11/module | cb70e8e45ecb9912a597ea9310c29baf9074fa90 | b5148fad3f3a23df749d3d3d7278c2ce22e067d8 | refs/heads/master | 2020-03-18T21:11:18.403431 | 2017-06-14T07:48:48 | 2017-06-14T07:48:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,607 | py | # -*- coding: utf-8 -*-
from openerp import api, fields, models, _
import base64
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT
from dateutil.relativedelta import relativedelta
from datetime import datetime, date, timedelta
from odoo.addons.greenloc_crm_ext.controllers import universign
from odoo.exceptions import UserError
CRM_LEAD_FIELDS_TO_MERGE = [
'name',
'partner_id',
'campaign_id',
'company_id',
'country_id',
'team_id',
'state_id',
'stage_id',
'medium_id',
'source_id',
'user_id',
'title',
'city',
'contact_name',
'description',
'email',
'fax',
'mobile',
'partner_name',
'phone',
'probability',
'planned_revenue',
'street',
'street2',
'zip',
'create_date',
'date_action_last',
'date_action_next',
'email_from',
'email_cc',
'partner_name',
'fal_model_1_pdf',
'fal_model_1_pdf_fname']
class crm_lead(models.Model):
_inherit = 'crm.lead'
# Default Value
def _get_roof_information_default(self):
return [(0, 0, {'name': _('Roof 1')}), (0, 0, {'name': _('Roof 2')}), (0, 0, {'name': _('Roof 3')}), (0, 0, {'name': _('Roof 4')})]
# Domain function
@api.model
def _getUserGroupId(self):
technician_group = self.env.ref('greenloc_crm_ext.group_greenloc_technician').users.ids
return [('id', 'in', technician_group)]
# Adding some required field
fal_sequence = fields.Char(string='Sequence',
copy=False)
fal_quality_user_id = fields.Many2one("res.users", "Quality Person", track_visibility='onchange')
fal_salesperson_user_id = fields.Many2one("res.users", "Validator Person", track_visibility='onchange')
fal_lead_stage = fields.Selection([('draft', 'Draft'),
('new', 'New'),
('to_control', 'To Control'),
('affected', 'Affected')],
string="Lead Stage", default='draft', track_visibility='onchange')
fal_send_email_customer = fields.Boolean(string="Send Email to customer")
fal_send_email_other = fields.Char(string="Also send Email to")
fal_roof_surface = fields.Integer(string='Total Roof Surface', compute="_get_roof_surface_total")
fal_sun_eyes_tools = fields.Float(string="Average Sun Eyes Tools", compute="_get_sun_eyes_average")
fal_france_building = fields.Boolean(string='Is a France Building')
fal_recall_reason = fields.Text(string="Recall Reason")
fal_lost_reason = fields.Text(string="Lost Reason")
fal_partner_child_ids = fields.One2many('res.partner', 'lead_id', string='Partner Contacts', related="partner_id.child_ids")
fal_marital_status = fields.Selection([('married', 'Married'),
('single', 'Single'),
('widower', 'Widower',),
('pacs', 'Pacs'),
('concubinage', 'Concubinage')],
string="Marital Status")
fal_wedding_contract = fields.Char(string="Wedding Contract")
fal_root_surface_pdf = fields.Binary(string="Root Surface's PDF")
fal_root_surface_pdf_fname = fields.Char("Root Surface's PDF Fname")
fal_sun_eyes_tools_pdf = fields.Binary(string="SunEyes Tool's PDF")
fal_sun_eyes_tools_pdf_fname = fields.Char("SunEyes Tool's PDF Fname")
fal_roof_information_ids = fields.One2many("greenloc.crm.roof.information", "crm_lead_id", "Roof Information", default=_get_roof_information_default)
fal_solargis_csv = fields.Binary(string="Solargis CSV")
fal_solargis_csv_fname = fields.Char("Solargis CSV Fname")
fal_parcel_no = fields.Char(string="Parcell Number")
fal_section = fields.Char(string="Section Number")
fal_contenance_1 = fields.Integer(string="Contenance 1")
fal_contenance_2 = fields.Integer(string="Contenance 2")
fal_contenance_3 = fields.Integer(string="Contenance 3")
fal_place_says = fields.Char(string="Place Says")
fal_goods = fields.Selection([('common', 'common'),
('clean', 'Clean')], string="Goods")
fal_ownership = fields.Selection([('alone', 'Alone'),
('indivision', 'Indivision'),
('co_ownership_or_similar', 'Co-ownership or similar'),
('usufruct_and_bare_ownership', 'Usufruct and bare ownership'),
('right_of_use_and_habitation', 'Right of use and habitation')], string="Ownership")
fal_model_1_pdf = fields.Binary(string="Model 1's PDF")
fal_model_1_pdf_fname = fields.Char("Model 1's PDF Fname")
fal_cadastre_pdf = fields.Binary(string="Cadastre's PDF")
fal_cadastre_pdf_fname = fields.Char("Cadastre's PDF Fname")
fal_quality_team_notes = fields.Text(string="QT Notes", help="Quality Team's Notes")
fal_mail_ids = fields.One2many('mail.mail', 'fal_lead_id', string="Mail")
fal_is_dia = fields.Boolean('Different Installation Address')
fal_dia_street = fields.Char('Installation Address', compute="_get_fal_dia_street", inverse="_set_fal_dia_street")
fal_dia_street2 = fields.Char('Installation Address 2', compute="_get_fal_dia_street2", inverse="_set_fal_dia_street2")
fal_dia_city = fields.Char('Installation City', compute="_get_fal_dia_city", inverse="_set_fal_dia_city")
fal_dia_state = fields.Many2one("res.country.state", 'Installation State', compute="_get_fal_dia_state", inverse="_set_fal_dia_state", ondelete='restrict')
fal_dia_zip = fields.Char('Installation Zip', compute="_get_fal_dia_zip", inverse="_set_fal_dia_zip")
fal_dia_country_id = fields.Many2one('res.country', 'Installation Country', compute="_get_fal_dia_country", inverse="_set_fal_dia_country", ondelete='restrict')
fal_dia_depart_no = fields.Char(string="Department N°", compute='_compute_department_no', store=True)
fal_dia_partner_latitude = fields.Float(string='Geo Latitude', digits=(16, 5))
fal_dia_partner_longitude = fields.Float(string='Geo Longitude', digits=(16, 5))
fal_dia_date_localization = fields.Date(string='Geolocation Date')
fal_last_date_signature_request = fields.Date("Last Signature Request at")
fal_website_form_result = fields.Selection([('lost', 'Lost'),
('won', 'Won'),
('recall', 'Recall')], 'Website Result')
fal_document_signature_ids = fields.One2many('greenloc.crm.lead.docs.sign.attachment', 'lead_id', 'Signature Documents')
fal_crm_universign_ids = fields.One2many('greenloc.crm.universign', 'lead_id', 'Universign')
fal_is_complex = fields.Boolean('Is Complex')
fal_is_rdvvt = fields.Boolean('Is RDV VT')
fal_is_l3 = fields.Boolean('Is L3')
fal_lead_origin = fields.Selection([('fo', 'FO'),
('grl', 'GRL'),
('prl', 'PRL'),
('ntc', 'NTC')], 'Lead Origin')
fal_technical_visit_ids = fields.One2many("greenloc.crm.lead.technical.visit", 'lead_id', 'Technical Visit')
fal_no_of_recall = fields.Integer("Number of Recall", default=0)
technician_id = fields.Many2one("res.users", "VT Technician", domain=_getUserGroupId)
# Relate field email and phone
email_from = fields.Char(related="partner_id.email")
phone = fields.Char(related="partner_id.phone")
mobile = fields.Char(related="partner_id.mobile")
# Remove later if it's not needed
# fal_signature_request_ids = fields.One2many('signature.request', 'fal_lead_id', 'Sign Documents')
# Compute Method
@api.one
@api.depends('fal_dia_zip')
def _compute_department_no(self):
if self.fal_dia_zip:
self.fal_dia_depart_no = self.fal_dia_zip[0:2]
@api.one
@api.depends('fal_roof_information_ids', 'fal_roof_information_ids.roof_surface')
def _get_roof_surface_total(self):
if self.fal_roof_information_ids:
total_roof_surface = 0
for fal_roof_information_id in self.fal_roof_information_ids:
total_roof_surface += fal_roof_information_id.roof_surface
self.fal_roof_surface = total_roof_surface
@api.one
@api.depends('fal_roof_information_ids', 'fal_roof_information_ids.roof_surface', 'fal_roof_information_ids.sun_eyes_tools')
def _get_sun_eyes_average(self):
if self.fal_roof_information_ids:
average_sun_eyes_tools = 0
total_roof_surface = 0
for fal_roof_information_id in self.fal_roof_information_ids:
average_sun_eyes_tools += fal_roof_information_id.roof_surface * fal_roof_information_id.sun_eyes_tools
total_roof_surface += fal_roof_information_id.roof_surface
if total_roof_surface != 0:
self.fal_sun_eyes_tools = average_sun_eyes_tools / total_roof_surface
else:
self.fal_sun_eyes_tools = average_sun_eyes_tools / 1
@api.one
@api.depends('fal_partner_child_ids', 'fal_partner_child_ids.state_id')
def _get_fal_dia_state(self):
if self.fal_partner_child_ids:
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
self.fal_dia_state = fal_partner_child_id.state_id and fal_partner_child_id.state_id.id or False
@api.one
def _set_fal_dia_state(self):
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
fal_partner_child_id.state_id = self.fal_dia_state and self.fal_dia_state.id or False
@api.one
@api.depends('fal_partner_child_ids', 'fal_partner_child_ids.street')
def _get_fal_dia_street(self):
if self.fal_partner_child_ids:
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
self.fal_dia_street = fal_partner_child_id.street
@api.one
def _set_fal_dia_street(self):
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
fal_partner_child_id.street = self.fal_dia_street
@api.one
@api.depends('fal_partner_child_ids', 'fal_partner_child_ids.street2')
def _get_fal_dia_street2(self):
if self.fal_partner_child_ids:
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
self.fal_dia_street2 = fal_partner_child_id.street2
@api.one
def _set_fal_dia_street2(self):
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
fal_partner_child_id.street2 = self.fal_dia_street2
@api.one
@api.depends('fal_partner_child_ids', 'fal_partner_child_ids.city')
def _get_fal_dia_city(self):
if self.fal_partner_child_ids:
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
self.fal_dia_city = fal_partner_child_id.city
@api.one
def _set_fal_dia_city(self):
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
fal_partner_child_id.city = self.fal_dia_city
@api.one
@api.depends('fal_partner_child_ids', 'fal_partner_child_ids.zip')
def _get_fal_dia_zip(self):
if self.fal_partner_child_ids:
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
self.fal_dia_zip = fal_partner_child_id.zip
@api.one
def _set_fal_dia_zip(self):
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
fal_partner_child_id.zip = self.fal_dia_zip
@api.one
@api.depends('fal_partner_child_ids', 'fal_partner_child_ids.country_id')
def _get_fal_dia_country(self):
if self.fal_partner_child_ids:
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
self.fal_dia_country_id = fal_partner_child_id.country_id and fal_partner_child_id.country_id.id or False
@api.one
def _set_fal_dia_country(self):
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
fal_partner_child_id.country_id = self.fal_dia_country_id and self.fal_dia_country_id.id or False
# Re-send model1 mail
@api.multi
def re_send_model1_mail(self):
template = self.env['ir.model.data'].xmlid_to_object('greenloc_crm_ext.lead_pdf_request_email')
mail = template.send_mail(self.id, force_send=True)
self.env['mail.mail'].browse(mail).fal_lead_id = self.id
return True
# Call Geolocation of Partner
@api.multi
def lead_geo_localize(self):
# Call Installation Address Geolocation method, copy the value of partner to lead
for lead in self:
for fal_partner_child_id in lead.fal_partner_child_ids:
if fal_partner_child_id.type == 'other' and (fal_partner_child_id.name and fal_partner_child_id.name.lower() in ["adresse d'installation", "addresse d'installation", "installation address"]):
if fal_partner_child_id.geo_localize():
lead.fal_dia_partner_latitude = fal_partner_child_id.partner_latitude
lead.fal_dia_partner_longitude = fal_partner_child_id.partner_longitude
lead.fal_dia_date_localization = fal_partner_child_id.date_localization
# Merge lead with head selected.
@api.multi
def merge_opportunity(self, user_id=False, team_id=False):
""" Merge opportunities in one. Different cases of merge:
- merge leads together = 1 new lead
- merge at least 1 opp with anything else (lead or opp) = 1 new opp
The resulting lead/opportunity will be the most important one (based on its confidence level)
updated with values from other opportunities to merge.
:param user_id : the id of the saleperson. If not given, will be determined by `_merge_data`.
:param team : the id of the sales team. If not given, will be determined by `_merge_data`.
:return crm.lead record resulting of th merge
"""
if len(self.ids) <= 1:
raise UserError(_('Please select more than one element (lead or opportunity) from the list view.'))
# Sorting the leads/opps according to the confidence level of its stage, which relates to the probability of winning it
# The confidence level increases with the stage sequence, except when the stage probability is 0.0 (Lost cases)
# An Opportunity always has higher confidence level than a lead, unless its stage probability is 0.0
def opps_key(opportunity):
sequence = -1
if opportunity.stage_id.on_change:
sequence = opportunity.stage_id.sequence
return (sequence != -1 and opportunity.type == 'opportunity'), sequence, -opportunity.id
opportunities = self.sorted(key=opps_key, reverse=True)
# get SORTED recordset of head and tail, and complete list
opportunities_head = opportunities[0]
opportunities_tail = opportunities[1:]
# Override Falinwa Greenloc
# If head is defined in merge, use it.
if 'head_selected' in self.env.context and self.env.context['head_selected']:
opportunities_head = self.env.context['head_selected']
opportunities_tail = False
opportunities_tail = self.browse()
for opportunity in opportunities:
if not opportunity == opportunities_head:
opportunities_tail += opportunity
opportunities = opportunities_head + opportunities_tail
# merge all the sorted opportunity. This means the value of
# the first (head opp) will be a priority.
merged_data = opportunities._merge_data(list(CRM_LEAD_FIELDS_TO_MERGE))
# force value for saleperson and sales team
if user_id:
merged_data['user_id'] = user_id
if team_id:
merged_data['team_id'] = team_id
# merge other data (mail.message, attachments, ...) from tail into head
opportunities_head.merge_dependences(opportunities_tail)
# check if the stage is in the stages of the sales team. If not, assign the stage with the lowest sequence
if merged_data.get('team_id'):
team_stage_ids = self.env['crm.stage'].search(['|', ('team_id', '=', merged_data['team_id']), ('team_id', '=', False)], order='sequence')
if merged_data.get('stage_id') not in team_stage_ids.ids:
merged_data['stage_id'] = team_stage_ids[0].id if team_stage_ids else False
# write merged data into first opportunity
opportunities_head.write(merged_data)
# delete tail opportunities
# we use the SUPERUSER to avoid access rights issues because as the user had the rights to see the records it should be safe to do so
opportunities_tail.sudo().unlink()
return opportunities_head
# Action Phone Call
@api.multi
def action_phone_call(self):
""" Link to open sent appraisal"""
self.ensure_one()
return {
"type": "ir.actions.act_url",
"url": "http://192.168.1.218/apicall.php?dest=" + str(self.phone) + "&poste=" + str(self.env['res.users'].browse(self._uid).partner_id.phone),
"target": "new",
}
# Action Mobile Call
@api.multi
def action_mobile_call(self):
""" Link to open sent appraisal"""
self.ensure_one()
return {
"type": "ir.actions.act_url",
"url": "http://192.168.1.218/apicall.php?dest=" + str(self.mobile) + "&poste=" + str(self.env['res.users'].browse(self._uid).partner_id.phone),
"target": "new",
}
# Action Fetch Mail
@api.multi
def fetch_pdf_email_from_greenloc(self):
attachment_name = self.env.context['attachment_name']
lead_id = self.env.context['active_id']
attachment_id = self.env['ir.attachment'].search([('res_id', '=', lead_id), ('res_model', '=', 'crm.lead'), ('name', '=', attachment_name)], limit=1)
self.browse(lead_id).fal_model_1_pdf = attachment_id.datas
self.browse(lead_id).fal_model_1_pdf_fname = attachment_name
self.browse(lead_id).action_set_to_control_lead_stage()
# Affect Lead or Opportunity
@api.model
def affect_lead_quality(self):
lead_id = self.env['crm.lead'].search([('active', '=', True), ('type', '=', 'lead'), ('fal_lead_stage', '=', 'affected'), ('fal_quality_user_id', '=', self.env.uid), ('fal_is_complex', '=', False)], order='write_date asc', limit=1)
if not lead_id:
lead_id = self.env['crm.lead'].search([('active', '=', True), ('type', '=', 'lead'), ('fal_lead_stage', '=', 'to_control'), ('fal_quality_user_id', '=', False), ('fal_is_complex', '=', False)], order='write_date asc', limit=1)
if lead_id:
view_id = self.env['ir.model.data'].get_object_reference('greenloc_crm_ext', 'crm_case_form_view_leads_greenloc')[1]
lead_id.action_set_to_affected_lead_stage()
return {
"type": "ir.actions.act_window",
"res_model": "crm.lead",
"view_mode": "form",
"view_type": "form",
"view_id": view_id,
"res_id": lead_id.id,
"tag": 'reload',
'target': 'current',
}
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': '/web',
}
@api.model
def affect_opportunity_salesperson(self):
new_stage = self.env['ir.model.data'].xmlid_to_object('crm.stage_lead1').id
lead_id = self.env['crm.lead'].search([('active', '=', True), ('type', '=', 'opportunity'), ('stage_id', '=', new_stage), ('fal_salesperson_user_id', '=', False), ('fal_is_complex', '=', False)], order='write_date asc', limit=1)
if lead_id:
view_id = self.env['ir.model.data'].get_object_reference('greenloc_crm_ext', 'crm_case_form_view_oppor_greenloc')[1]
lead_id.action_set_next_stage()
lead_id.fal_salesperson_user_id = self._uid
return {
"type": "ir.actions.act_window",
"res_model": "crm.lead",
"view_mode": "form",
"view_type": "form",
"view_id": view_id,
"res_id": lead_id.id,
"tag": 'reload',
'target': 'current',
}
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': '/web',
}
# Skip crm.lead2ooprtunity
@api.multi
def action_fast_convert_to_opportunity(self):
self.ensure_one()
for lead in self:
crm_lead_2_opportunity_values = {
'team_id': self.env['res.users'].browse(self._uid).sale_team_id.id or lead.team_id.id or False,
'action': 'create',
'opportunity_ids': [(4, lead.id)],
'name': 'convert',
'user_id': self._uid}
self.env['crm.lead2opportunity.partner'].sudo().create(crm_lead_2_opportunity_values).with_context(active_ids=[lead.id]).action_apply()
template = self.env['ir.model.data'].xmlid_to_object('greenloc_crm_ext.new_opportunity_greenloc_email')
mail = template.send_mail(lead.id, force_send=True)
self.env['mail.mail'].browse(mail).fal_lead_id = lead.id
if self.env['res.users'].browse(self._uid).has_group('sales_team.group_sale_manager'):
return lead.redirect_opportunity_view()
else:
return self.affect_lead_quality()
# Action Methods (Change Stage for Leads)
@api.multi
def action_set_to_draft_lead_stage(self):
for lead in self:
lead.write({'fal_lead_stage': 'draft', 'fal_quality_user_id': False})
if self.env['res.users'].browse(self._uid).has_group('sales_team.group_sale_manager'):
return True
else:
return self.affect_lead_quality()
return True
@api.multi
def action_set_to_new_lead_stage(self):
for lead in self:
lead.write({'fal_lead_stage': 'new', 'user_id': self._uid})
menu_ids = self.env.ref('greenloc_crm_ext.greenloc_menu_crm_leads_operator').id
if self.env['res.users'].browse(self._uid).has_group('sales_team.group_sale_manager'):
return True
else:
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids or False},
'target': 'current',
}
return True
@api.multi
def action_set_to_control_lead_stage(self):
for lead in self:
lead.write({'fal_lead_stage': 'to_control'})
menu_ids = self.env.ref('greenloc_crm_ext.greenloc_menu_crm_leads_operator').id
if self.env['res.users'].browse(self._uid).has_group('sales_team.group_sale_manager'):
return True
else:
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids or False},
'target': 'current',
}
return True
@api.multi
def action_set_to_affected_lead_stage(self):
for lead in self:
lead.write({'fal_lead_stage': 'affected', 'fal_quality_user_id': self._uid})
# We don't need it anymore
# template = self.env['ir.model.data'].xmlid_to_object('greenloc_crm_ext.lead_to_affected_email')
# mail = template.send_mail(lead.id, force_send=True)
# self.env['mail.mail'].browse(mail).fal_lead_id = lead.id
return True
@api.multi
def action_set_lost(self):
""" Lost semantic: probability = 0, active = False """
self.write({'probability': 0, 'active': False})
# Send Email to customer
if self.fal_lead_stage == 'affected' and self.type == 'lead':
template = self.env['ir.model.data'].xmlid_to_object('greenloc_crm_ext.lost_opportunity_greenloc_email')
mail = template.send_mail(self.id, force_send=True)
self.env['mail.mail'].browse(mail).fal_lead_id = self.id
if self.env['res.users'].browse(self._uid).has_group('sales_team.group_sale_manager'):
return True
elif 'view_form_source' in self.env.context and self.env.context['view_form_source'] == 'lead':
return self.affect_lead_quality()
else:
if self.env['res.users'].browse(self._uid).has_group('greenloc_crm_ext.group_greenloc_salesperson'):
menu_ids = self.env.ref('greenloc_crm_ext.greenloc_menu_crm_opportunity_salesperson').id
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids or False},
'target': 'current',
}
else:
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': '/web',
}
@api.multi
def action_set_won(self):
""" Won semantic: probability = 100 (active untouched) """
for lead in self:
stage_id = lead._stage_find(domain=[('probability', '=', 100.0), ('on_change', '=', True)])
lead.write({'stage_id': stage_id.id, 'probability': 100})
# Send Email To Customer
attachment_ids = []
for document_sign in lead.fal_document_signature_ids:
if not document_sign.inactive:
if document_sign.signed_doc_id:
attachment = document_sign.signed_doc_id
attachment_ids.append(attachment.id)
email_values = {'attachment_ids': attachment_ids}
template = self.env['ir.model.data'].xmlid_to_object('greenloc_crm_ext.opportunity_to_won_email')
mail = template.with_context().send_mail(self.id, force_send=True, email_values=email_values)
self.env['mail.mail'].browse(mail).fal_lead_id = self.id
return True
# Action Methods (Change Stage)
@api.multi
def action_set_l3_stage(self):
for lead in self:
stage_id = self.env.ref('greenloc_crm_ext.greenloc_lead_workflow_6')
lead.write({'stage_id': stage_id.id, 'fal_is_l3': True})
return True
@api.multi
def action_set_l3_to_rdvvt_stage(self):
# Should only accessed from RDV VT Stage
for lead in self:
stage_id = self.env.ref('greenloc_crm_ext.greenloc_lead_workflow_5')
lead.write({'stage_id': stage_id.id, 'fal_is_l3': False})
return True
@api.multi
def action_set_rdvvt_stage(self):
for lead in self:
stage_id = self.env.ref('greenloc_crm_ext.greenloc_lead_workflow_5')
# Check VT Technician Login
if lead.technician_id and lead.technician_id.fal_universign_login and lead.technician_id.fal_universign_password:
lead.generate_documents_rdvvt()
lead.write({'stage_id': stage_id.id, 'fal_is_rdvvt': True})
else:
raise UserError(_("Please provide Technician in the Lead, also make sure Universign login and password is set on the technician user."))
return True
@api.multi
def action_set_rdvvt_to_won__stage(self):
# Should only accessed from RDV VT Stage
for lead in self:
stage_id = lead._stage_find(domain=[('probability', '=', 100.0), ('on_change', '=', True)])
lead.write({'stage_id': stage_id.id, 'probability': 100, 'fal_is_rdvvt': False})
return True
@api.multi
def action_set_next_stage(self):
for lead in self:
stage_id = lead._stage_find(domain=[('probability', '>', lead.probability), ('on_change', '=', True)])
lead.write({'stage_id': stage_id.id, 'probability': stage_id.probability})
return True
@api.multi
def action_set_opportunity_new(self):
for lead in self:
new_stage = self.env.ref('crm.stage_lead1')
lead.write({'stage_id': new_stage.id, 'probability': new_stage.probability, 'fal_no_of_recall': (lead.fal_no_of_recall + 1)})
self.fal_salesperson_user_id = False
if self.env['res.users'].browse(self._uid).has_group('sales_team.group_sale_manager'):
return True
else:
menu_ids = self.env.ref('greenloc_crm_ext.greenloc_menu_crm_opportunity_salesperson').id
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids or False},
'target': 'current',
}
return True
@api.multi
def action_set_prev_stage(self):
for lead in self:
stage_id = lead._stage_find(domain=[('probability', '<', lead.probability), ('on_change', '=', True)], order="sequence desc")
lead.write({'stage_id': stage_id.id, 'probability': stage_id.probability})
if stage_id == self.env.ref('crm.stage_lead1'):
self.fal_salesperson_user_id = False
if self.env['res.users'].browse(self._uid).has_group('sales_team.group_sale_manager'):
return True
else:
menu_ids = self.env.ref('greenloc_crm_ext.greenloc_menu_crm_opportunity_salesperson').id
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids or False},
'target': 'current',
}
return True
@api.multi
def _onchange_partner_id_values(self, partner_id):
"""Recover first and last names from partner if available."""
result = super(crm_lead, self)._onchange_partner_id_values(partner_id)
if partner_id:
partner = self.env["res.partner"].browse(partner_id)
if not partner.is_company:
result.update({
"contact_name": partner.firstname,
"contact_lastname": partner.lastname,
"fal_marital_status": partner.fal_marital_status,
"fal_wedding_contract": partner.fal_wedding_contract,
})
return result
def send_mail_model1_pdf(self):
template = self.env['ir.model.data'].xmlid_to_object('greenloc_crm_ext.lead_pdf_request_email')
mail = template.send_mail(self.id, force_send=True)
self.env['mail.mail'].browse(mail).fal_lead_id = self.id
return True
@api.model
def create(self, vals):
vals['fal_sequence'] = self.env['ir.sequence'].next_by_code('crm.lead') or 'New'
result = super(crm_lead, self).create(vals)
# Give new Name, if it's from website, otherwise it should be from import. Do not change the name
if result.fal_website_form_result:
record_new_name = "GRL_" + str(result.fal_sequence)
if result.partner_id.company_type == 'person':
record_new_name += " - PART - "
else:
record_new_name += " - PRO - "
if result.fal_is_complex:
record_new_name += result.partner_id.name
else:
record_new_name += result.partner_id.lastname + "/" + result.partner_id.firstname
result.update({'name': record_new_name})
# Send Email to greenloc to generate PDF
result.send_mail_model1_pdf()
return result
# Block writing on fal_lead_origin if you are not admin
@api.multi
def write(self, vals):
partner_id = False
if 'partner_id' in vals:
partner_id = vals['partner_id']
if ('fal_lead_origin' in vals or (partner_id and partner_id != self.partner_id.id)) and not self.env['res.users'].browse(self._uid).has_group('base.group_erp_manager'):
raise UserError(_("Only User with group : Administration(Access Right) can edit Lead Origin or Customer."))
return super(crm_lead, self).write(vals)
# Send document to universign
@api.multi
def send_to_universign(self, universign_docs=False, universign_signers=False, main_partner=False):
universign_connector = universign.UniversignConnector()
# Removing False value on signers
if universign_signers:
for universign_signer in universign_signers:
for key in universign_signer:
if universign_signer[key] == False:
universign_signer.pop(key, None)
result = universign_connector.request_sign(universign_name=self.name, universign_docs=universign_docs, universign_signers=universign_signers)
self.fal_crm_universign_ids = [(0, 0, {'lead_id': self.id, 'fal_universign_url': result['url'], 'fal_universign_id': result['id'], 'fal_universign_login': "[email protected]", 'fal_universign_server': '@ws.universign.eu/sign/rpc/', 'fal_universign_type': 'reg'})]
if result:
email_to = universign_signers[0]['emailAddress']
template = self.env['ir.model.data'].xmlid_to_object('greenloc_crm_ext.universign_greenloc_email')
attachment_ids = []
for document_sign in universign_docs:
attachment = self.env['ir.attachment'].search([('name', '=', document_sign['name'])], limit=1)
attachment_ids.append(attachment.id)
email_values = {'attachment_ids': attachment_ids}
mail = template.with_context(email_to=email_to).send_mail(self.id, force_send=True, email_values=email_values)
self.env['mail.mail'].browse(mail).fal_lead_id = self.id
# Send document to universign Rdvvt
@api.multi
def send_rdvvt_to_universign(self, universign_docs=False, universign_signers=False, main_partner=False):
universign_connector = universign.UniversignConnector()
# Removing False value on signers
if universign_signers:
for universign_signer in universign_signers:
for key in universign_signer:
if universign_signer[key] == False:
universign_signer.pop(key, None)
result = universign_connector.request_sign_l3(universign_name=self.name, universign_docs=universign_docs, universign_signers=universign_signers, universign_login=self.technician_id.fal_universign_login, universign_password=self.technician_id.fal_universign_password)
self.fal_crm_universign_ids = [(0, 0, {'lead_id': self.id, 'fal_universign_url': result['url'], 'fal_universign_id': result['id'], 'fal_universign_login': self.technician_id.fal_universign_login, 'fal_universign_server': '@sign.test.cryptolog.com/sign/rpc/', 'fal_universign_type': 'l3'})]
# Cron to receive from universign
@api.model
def cron_receive_universign(self, universign_docs=False, universign_signers=False):
universign_connector = universign.UniversignConnector()
tehcnician_vt = [(technician.fal_universign_login, technician.fal_universign_password) for technician in self.env['res.users'].search([('fal_universign_login', '!=', False), ('fal_universign_password', '!=', False)])]
# Universign 1st Doc
for lead_universign in self.env['greenloc.crm.universign'].search([('fal_universign_id', '!=', False), ('fal_universign_doc_received', '=', False), ('fal_universign_doc_failed', '=', False), ('lead_id', '!=', False), ('fal_universign_type', '=', 'reg')]):
results = universign_connector.request_get(universign_id=lead_universign.fal_universign_id, lead_id=lead_universign.lead_id.id, technician=tehcnician_vt)
if results in ['expired', 'canceled', 'failed']:
affected_stage = self.env.ref('greenloc_crm_ext.greenloc_lead_workflow_1')
lead_universign.lead_id.write({'stage_id': affected_stage.id, 'probability': affected_stage.probability})
lead_universign.fal_universign_doc_failed = results
if results and results not in ['expired', 'canceled', 'failed']:
for result in results:
attachment = self.env['ir.attachment'].create({
'name': 'Signed - ' + result,
'datas_fname': 'Signed - ' + result + '.pdf',
'datas': results[result],
'type': 'binary',
'res_model': 'crm.lead',
'fal_protected': True,
'res_model': 'crm.lead',
'res_id': lead_universign.lead_id.id,
'res_name': lead_universign.lead_id.name,
'fal_lead_id': lead_universign.lead_id.id,
})
match_doc = self.env['ir.attachment'].search([('name', '=', result)], limit=1)
if match_doc:
greenloc_docs = match_doc.fal_greenloc_crm_lead_docs_initial_attachment
greenloc_docs.signed_doc_ids = [(4, attachment.id)]
greenloc_docs.signed_doc_id = attachment.id
greenloc_docs.signed_doc_id_binary = attachment.datas
greenloc_docs.signed_doc_id_fname = attachment.datas_fname
lead_universign.fal_universign_doc_received = True
lead_universign.lead_id.action_set_won()
# Universign L3 Doc
for lead_universign in self.env['greenloc.crm.universign'].search([('fal_universign_id', '!=', False), ('fal_universign_doc_received', '=', False), ('fal_universign_doc_failed', '=', False), ('lead_id', '!=', False), ('fal_universign_type', '=', 'l3')]):
results = universign_connector.request_get(universign_id=lead_universign.fal_universign_id, lead_id=lead_universign.lead_id.id, technician=tehcnician_vt)
if results in ['expired', 'canceled', 'failed']:
lead_universign.fal_universign_doc_failed = results
if results and results not in ['expired', 'canceled', 'failed']:
for result in results:
attachment = self.env['ir.attachment'].create({
'name': 'Signed - ' + result,
'datas_fname': 'Signed - ' + result + '.pdf',
'datas': results[result],
'type': 'binary',
'res_model': 'crm.lead',
'fal_protected': True,
'res_model': 'crm.lead',
'res_id': lead_universign.lead_id.id,
'res_name': lead_universign.lead_id.name,
'fal_lead_id': lead_universign.lead_id.id,
})
match_doc = self.env['ir.attachment'].search([('name', '=', result)], limit=1)
if match_doc:
greenloc_docs = match_doc.fal_greenloc_crm_lead_docs_initial_attachment
greenloc_docs.signed_doc_ids = [(4, attachment.id)]
greenloc_docs.signed_doc_id = attachment.id
greenloc_docs.signed_doc_id_binary = attachment.datas
greenloc_docs.signed_doc_id_fname = attachment.datas_fname
lead_universign.fal_universign_doc_received = True
lead_universign.lead_id.action_set_l3_stage()
# Generate neccesary document
@api.multi
def generate_documents(self):
self.ensure_one()
self.fal_last_date_signature_request = fields.Date.today()
universign_docs = []
universign_signers = []
# For annexe 7
partner_date_format = self.partner_id.lang and self.env['res.lang'].search([('code', '=', self.partner_id.lang)], limit=1).date_format or DEFAULT_SERVER_DATE_FORMAT
# Plus 10 years 6 month
dateplus10years = datetime.strptime(self.fal_last_date_signature_request, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(years=10, months=6)
dateplus10years = datetime.strftime(dateplus10years, partner_date_format)
# Plus 10 years 6 month min 1 days
dateplus10yearsmin1days = datetime.strptime(self.fal_last_date_signature_request, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(years=10, months=6)
dateplus10yearsmin1days = dateplus10yearsmin1days - relativedelta(days=1)
dateplus10yearsmin1days = datetime.strftime(dateplus10yearsmin1days, partner_date_format)
# Plus 20 years 6 month
dateplus20years = datetime.strptime(self.fal_last_date_signature_request, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(years=20, months=6)
dateplus20years = datetime.strftime(dateplus20years, partner_date_format)
# Partner Check and Added + Check if there is child with no email
# 1. Main Partner
universign_signers.append({'firstname': self.partner_id.firstname, 'lastname': self.partner_id.lastname, 'emailAddress': self.partner_id.email, 'certificateType': 'simple'})
# 2. Co Partner
multiple_owner_check = False
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.fal_is_owner:
multiple_owner_check = True
universign_signers.append({'firstname': fal_partner_child_id.firstname, 'lastname': fal_partner_child_id.lastname, 'emailAddress': fal_partner_child_id.email, 'certificateType': 'simple'})
if not fal_partner_child_id.email:
raise UserError(_("Please provide Email for main partner and all related contacts (That is Lodger or Owner)."))
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
# 3. Lodger
lodger_check = False
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.fal_is_lodger:
lodger_check = True
universign_signers.append({'firstname': fal_partner_child_id.firstname, 'lastname': fal_partner_child_id.lastname, 'emailAddress': fal_partner_child_id.email, 'certificateType': 'simple'})
if not fal_partner_child_id.email:
raise UserError(_("Please provide Email for main partner and all related contacts (That is Lodger or Owner)."))
# Logic to select the document, also to determine the behavior to add to docs list or just adding to available docs list
# Logic to select child ids that is Installation Address
dia_street = self.fal_dia_street or False
dia_street2 = self.fal_dia_street2 or False
dia_zip = self.fal_dia_zip or False
dia_city = self.fal_dia_city or False
if not multiple_owner_check:
if lodger_check:
generated_docs = self.with_context(dia_street=dia_street, dia_street2=dia_street2, dia_zip=dia_zip, dia_city=dia_city).generate_report_single_with_lodger()
if 's_with' in available_docs_by_code:
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s_with']).write({'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id})
else:
self.env['greenloc.crm.lead.docs.sign.attachment'].create({'name': _("Single With Lodger"), 'code': 's_with', 'lead_id': self.id, 'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id, 'version': 1})
else:
generated_docs = self.with_context(dia_street=dia_street, dia_street2=dia_street2, dia_zip=dia_zip, dia_city=dia_city).generate_report_single_no_lodger()
if 's_no' in available_docs_by_code:
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s_no']).write({'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id})
else:
self.env['greenloc.crm.lead.docs.sign.attachment'].create({'name': _("Single No Lodger"), 'code': 's_no', 'lead_id': self.id, 'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id, 'version': 1})
else:
if lodger_check:
generated_docs = self.with_context(dia_street=dia_street, dia_street2=dia_street2, dia_zip=dia_zip, dia_city=dia_city).generate_report_multi_with_lodger()
if 'm_with' in available_docs_by_code:
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m_with']).write({'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id})
else:
self.env['greenloc.crm.lead.docs.sign.attachment'].create({'name': _("Multi With Lodger"), 'code': 'm_with', 'lead_id': self.id, 'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id, 'version': 1})
else:
generated_docs = self.with_context(dia_street=dia_street, dia_street2=dia_street2, dia_zip=dia_zip, dia_city=dia_city).generate_report_multi_no_lodger()
if 'm_no' in available_docs_by_code:
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m_no']).write({'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id})
else:
self.env['greenloc.crm.lead.docs.sign.attachment'].create({'name': _("Multi No Lodger"), 'code': 'm_no', 'lead_id': self.id, 'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id, 'version': 1})
# Prepare docs to send to universign, also signers position and index
signatureFields = []
for signer_idx in range(len(universign_signers)):
signatureFields.append({'page': 1, 'x': 80, 'y': 42, 'signerIndex': signer_idx})
universign_docs.append({'name': generated_docs.name, 'content': generated_docs.datas, 'signatureFields': signatureFields})
# Send to Universign
# self.send_to_universign(universign_docs=universign_docs, universign_signers=universign_signers, main_partner=True)
return True
# Generate neccesary document
@api.multi
def generate_documents_rdvvt(self):
self.ensure_one()
self.fal_last_date_signature_request = fields.Date.today()
universign_docs = []
universign_signers = []
# Partner Check and Added + Check if there is child with no email
# 1. Main Partner
universign_signers.append({'firstname': self.partner_id.firstname, 'lastname': self.partner_id.lastname, 'emailAddress': self.partner_id.email, 'certificateType': 'advanced'})
# 2. Co Partner
multiple_owner_check = False
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.fal_is_owner:
multiple_owner_check = True
universign_signers.append({'firstname': fal_partner_child_id.firstname, 'lastname': fal_partner_child_id.lastname, 'emailAddress': fal_partner_child_id.email, 'certificateType': 'advanced'})
if not fal_partner_child_id.email:
raise UserError(_("Please provide Email for main partner and all related contacts (That is Lodger or Owner)."))
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
# 3. Lodger
lodger_check = False
for fal_partner_child_id in self.fal_partner_child_ids:
if fal_partner_child_id.fal_is_lodger:
lodger_check = True
universign_signers.append({'firstname': fal_partner_child_id.firstname, 'lastname': fal_partner_child_id.lastname, 'emailAddress': fal_partner_child_id.email, 'certificateType': 'advanced'})
if not fal_partner_child_id.email:
raise UserError(_("Please provide Email for main partner and all related contacts (That is Lodger or Owner)."))
# Logic to select the document, also to determine the behavior to add to docs list or just adding to available docs list
# Logic to select child ids that is Installation Address
dia_street = self.fal_dia_street or False
dia_street2 = self.fal_dia_street2 or False
dia_zip = self.fal_dia_zip or False
dia_city = self.fal_dia_city or False
if not multiple_owner_check:
if lodger_check:
generated_docs = self.with_context(dia_street=dia_street, dia_street2=dia_street2, dia_zip=dia_zip, dia_city=dia_city).generate_report_l3_single_with_lodger()
if 's3_with' in available_docs_by_code:
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s3_with']).write({'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id})
else:
self.env['greenloc.crm.lead.docs.sign.attachment'].create({'name': _("Single L3 With Lodger"), 'code': 's3_with', 'lead_id': self.id, 'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id, 'version': 1})
else:
generated_docs = self.with_context(dia_street=dia_street, dia_street2=dia_street2, dia_zip=dia_zip, dia_city=dia_city).generate_report_l3_single_no_lodger()
if 's3_no' in available_docs_by_code:
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s3_no']).write({'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id})
else:
self.env['greenloc.crm.lead.docs.sign.attachment'].create({'name': _("Single L3 No Lodger"), 'code': 's3_no', 'lead_id': self.id, 'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id, 'version': 1})
else:
if lodger_check:
generated_docs = self.with_context(dia_street=dia_street, dia_street2=dia_street2, dia_zip=dia_zip, dia_city=dia_city).generate_report_l3_multi_with_lodger()
if 'm3_with' in available_docs_by_code:
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m3_with']).write({'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id})
else:
self.env['greenloc.crm.lead.docs.sign.attachment'].create({'name': _("Multi L3 With Lodger"), 'code': 'm3_with', 'lead_id': self.id, 'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id, 'version': 1})
else:
generated_docs = self.with_context(dia_street=dia_street, dia_street2=dia_street2, dia_zip=dia_zip, dia_city=dia_city).generate_report_l3_multi_no_lodger()
if 'm3_no' in available_docs_by_code:
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m3_no']).write({'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id})
else:
self.env['greenloc.crm.lead.docs.sign.attachment'].create({'name': _("Multi L3 No Lodger"), 'code': 'm3_no', 'lead_id': self.id, 'initial_doc_ids': [(4, generated_docs.id)], 'initial_doc_id': generated_docs.id, 'version': 1})
# Prepare docs to send to universign, also signers position and index
signatureFields = []
for signer_idx in range(len(universign_signers)):
signatureFields.append({'page': 1, 'x': 80, 'y': 42, 'signerIndex': signer_idx})
universign_docs.append({'name': generated_docs.name, 'content': generated_docs.datas, 'signatureFields': signatureFields})
# Send to Universign
#self.send_rdvvt_to_universign(universign_docs=universign_docs, universign_signers=universign_signers, main_partner=True)
return True
@api.multi
def generate_report_multi_with_lodger(self):
self.ensure_one()
report = self.env['ir.actions.report.xml'].with_context(self._context).search([('report_name', '=', 'greenloc_crm_ext.report_multi_with_lodger')])
if report:
document = self.env['report'].with_context(self._context).get_pdf([self.id], report.report_name)
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
if 'm_with' in available_docs_by_code:
current_version = self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m_with']).version
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m_with']).version = current_version+1
return self.env['ir.attachment'].create({
'name': self.name + ' - MWL -' + ' V' + str(current_version),
'datas_fname': self.name + ' - MWL -' + ' V' + str(current_version) + '.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
else:
return self.env['ir.attachment'].create({
'name': self.name + ' - MWL -' + ' V0',
'datas_fname': self.name + ' - MWL -' + ' V0.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
@api.multi
def generate_report_multi_no_lodger(self):
self.ensure_one()
report = self.env['ir.actions.report.xml'].with_context(self._context).search([('report_name', '=', 'greenloc_crm_ext.report_multi_no_lodger')])
if report:
document = self.env['report'].with_context(self._context).get_pdf([self.id], report.report_name)
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
if 'm_no' in available_docs_by_code:
current_version = self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m_no']).version
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m_no']).version = current_version+1
return self.env['ir.attachment'].create({
'name': self.name + ' - MNL -' + ' V' + str(current_version),
'datas_fname': self.name + ' - MNL -' + ' V' + str(current_version) + '.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
else:
return self.env['ir.attachment'].create({
'name': self.name + ' - MNL -' + ' V0',
'datas_fname': self.name + ' - MNL -' + ' V0.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
@api.multi
def generate_report_single_with_lodger(self):
self.ensure_one()
report = self.env['ir.actions.report.xml'].with_context(self._context).search([('report_name', '=', 'greenloc_crm_ext.report_single_with_lodger')])
if report:
document = self.env['report'].with_context(self._context).get_pdf([self.id], report.report_name)
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
if 's_with' in available_docs_by_code:
current_version = self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s_with']).version
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s_with']).version = current_version+1
return self.env['ir.attachment'].create({
'name': self.name + ' - SWL -' + ' V' + str(current_version),
'datas_fname': self.name + ' - SWL -' + ' V' + str(current_version) + '.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
else:
return self.env['ir.attachment'].create({
'name': self.name + ' - SWL -' + ' V0',
'datas_fname': self.name + ' - SWL -' + ' V0.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
@api.multi
def generate_report_single_no_lodger(self):
self.ensure_one()
report = self.env['ir.actions.report.xml'].with_context(self._context).search([('report_name', '=', 'greenloc_crm_ext.report_single_no_lodger')])
if report:
document = self.env['report'].with_context(self._context).get_pdf([self.id], report.report_name)
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
if 's_no' in available_docs_by_code:
current_version = self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s_no']).version
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s_no']).version = current_version+1
return self.env['ir.attachment'].create({
'name': self.name + ' - SNL -' + ' V' + str(current_version),
'datas_fname': self.name + ' - SNL -' + ' V' + str(current_version) + '.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
else:
return self.env['ir.attachment'].create({
'name': self.name + ' - SNL -' + ' V0',
'datas_fname': self.name + ' - SNL -' + ' V0.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
@api.multi
def generate_report_l3_multi_with_lodger(self):
self.ensure_one()
report = self.env['ir.actions.report.xml'].with_context(self._context).search([('report_name', '=', 'greenloc_crm_ext.report_l3_multi_with_lodger')])
if report:
document = self.env['report'].with_context(self._context).get_pdf([self.id], report.report_name)
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
if 'm3_with' in available_docs_by_code:
current_version = self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m3_with']).version
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m3_with']).version = current_version+1
return self.env['ir.attachment'].create({
'name': self.name + ' - MWL L3 -' + ' V' + str(current_version),
'datas_fname': self.name + ' - MWL L3 -' + ' V' + str(current_version) + '.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
else:
return self.env['ir.attachment'].create({
'name': self.name + ' - MWL L3 -' + ' V0',
'datas_fname': self.name + ' - MWL L3 -' + ' V0.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
@api.multi
def generate_report_l3_multi_no_lodger(self):
self.ensure_one()
report = self.env['ir.actions.report.xml'].with_context(self._context).search([('report_name', '=', 'greenloc_crm_ext.report_l3_multi_no_lodger')])
if report:
document = self.env['report'].with_context(self._context).get_pdf([self.id], report.report_name)
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
if 'm3_no' in available_docs_by_code:
current_version = self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m3_no']).version
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['m3_no']).version = current_version+1
return self.env['ir.attachment'].create({
'name': self.name + ' - MNL L3 -' + ' V' + str(current_version),
'datas_fname': self.name + ' - MNL L3 -' + ' V' + str(current_version) + '.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
else:
return self.env['ir.attachment'].create({
'name': self.name + ' - MNL L3 -' + ' V0',
'datas_fname': self.name + ' - MNL L3 -' + ' V0.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
@api.multi
def generate_report_l3_single_with_lodger(self):
self.ensure_one()
report = self.env['ir.actions.report.xml'].with_context(self._context).search([('report_name', '=', 'greenloc_crm_ext.report_l3_single_with_lodger')])
if report:
document = self.env['report'].with_context(self._context).get_pdf([self.id], report.report_name)
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
if 's3_with' in available_docs_by_code:
current_version = self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s3_with']).version
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s3_with']).version = current_version+1
return self.env['ir.attachment'].create({
'name': self.name + ' - SWL L3 -' + ' V' + str(current_version),
'datas_fname': self.name + ' - SWL L3-' + ' V' + str(current_version) + '.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
else:
return self.env['ir.attachment'].create({
'name': self.name + ' - SWL L3 -' + ' V0',
'datas_fname': self.name + ' - SWL L3 -' + ' V0.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
@api.multi
def generate_report_l3_single_no_lodger(self):
self.ensure_one()
report = self.env['ir.actions.report.xml'].with_context(self._context).search([('report_name', '=', 'greenloc_crm_ext.report_l3_single_no_lodger')])
if report:
document = self.env['report'].with_context(self._context).get_pdf([self.id], report.report_name)
available_docs_by_code = {docs.code: docs.id for docs in self.fal_document_signature_ids}
if 's3_no' in available_docs_by_code:
current_version = self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s3_no']).version
self.env['greenloc.crm.lead.docs.sign.attachment'].browse(available_docs_by_code['s3_no']).version = current_version+1
return self.env['ir.attachment'].create({
'name': self.name + ' - SNL L3 -' + ' V' + str(current_version),
'datas_fname': self.name + ' - SNL L3 -' + ' V' + str(current_version) + '.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
else:
return self.env['ir.attachment'].create({
'name': self.name + ' - SNL L3 -' + ' V0',
'datas_fname': self.name + ' - SNL L3 -' + ' V0.pdf',
'datas': base64.b64encode(document),
'type': 'binary',
'res_model': 'crm.lead',
'res_id': self.id,
'res_name': self.name,
'fal_lead_id': self.id,
})
| [
"[email protected]"
]
| |
b0718572e15181513d4f6940c68e176e3433a69f | f0a4ba1f1f941092e68e4b1ef9cff0d3852199ef | /Do_it!/3.검색 알고리즘/해시/chained_hash.py | 304cce40e99b6dc0f40b07abb5cd4a9ee238b869 | []
| no_license | lsb530/Algorithm-Python | d41ddd3ca7675f6a69d322a4646d75801f0022b2 | a48c6df50567c9943b5d7218f874a5c0a85fcc6d | refs/heads/master | 2023-06-18T04:36:09.221769 | 2021-06-28T16:49:35 | 2021-06-28T16:49:35 | 367,775,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,187 | py | # 체인법(chaining)으로 해시 함수 구현하기
# 체인법이란 해시값이 같은 데이터를 체인(chain) 모양의 연결 리스트로 연결하는 방법
# 오픈 해시법(open hashing)이라고도 한다.
from __future__ import annotations
import hashlib
from typing import Any
class Node:
"""해시를 구성하는 노드"""
def __init__(self, key: Any, value: Any, next: Node) -> None:
"""초기화"""
self.key = key # 키
self.value = value # 값
self.next = next # 뒤쪽 노드를 참조
class ChainedHash:
"""체인법으로 해시 클래스 구현"""
def __init__(self, capacity: int) -> None:
"""초기화"""
self.capacity = capacity # 해시 테이블의 크기를 지정
self.table = [None] * self.capacity # 해시 테이블(리스트)을 선언
def hash_value(self, key: Any) -> int:
"""해시값을 구함"""
if isinstance(key, int):
return key % self.capacity
return (int(hashlib.sha256(str(key).encode()).hexdigest(), 16) % self.capacity)
# 키로 원소를 검색하는 search() 함수
def search(self, key: Any) -> Any:
"""키가 key인 원소를 검색하여 값을 반환"""
hash = self.hash_value(key) # 검색하는 키의 해시값
p = self.table[hash] # 노드를 주목
while p is not None:
if p.key == key:
return p.value # 검색 성공
p = p.next # 뒤쪽 노드를 주목
return None # 검색 실패
# 원소를 추가하는 함수
def add(self, key: Any, value: Any) -> bool:
"""키가 key이고 값이 value인 원소를 추가"""
hash = self.hash_value(key) # 추가하는 key의 해시값
p = self.table[hash] # 노드를 주목
while p is not None:
if p.key == key:
return False # 추가 실패
p = p.next # 뒤쪽 노드를 주목
temp = Node(key, value, self.table[hash])
self.table[hash] = temp # 노드를 추가
return True # 추가 성공
# 원소를 삭제하는 함수
def remove(self, key: Any) -> bool:
"""키가 key인 원소를 삭제"""
hash = self.hash_value(key) # 삭제할 key의 해시값
p = self.table[hash] # 노드를 주목
pp = None # 바로 앞의 노드를 주목
while p is not None:
if p.key == key: # key를 발견하면 아래를 실행
if pp is None:
self.table[hash] = p.next
else:
pp.next = p.next
return True # key 삭제 성공
pp = p
p = p.next # 뒤쪽 노드를 주목
return False # 삭제 실패(key가 존재하지 않음)
# 원소를 출력하는 함수
def dump(self) -> None:
"""해시 테이블을 덤프"""
for i in range(self.capacity):
p = self.table[i]
print(i, end='')
while p is not None:
print(f' -> {p.key} ({p.value})', end='')
p = p.next
print()
| [
"[email protected]"
]
| |
bd4162975e1ee1079c46ef6c8b1fc3c18b01a5c1 | 3ba18755bbf53a2e918a79e1c57a48f44ac1e670 | /venv/bin/isort | c3a1d2046d4613f88dd5974e0f230d1d122c8129 | []
| no_license | celaltas/FlaskProject | d57bddf99d807a97981d477048a3a5eb4a97d5a5 | 8a9fe33c970f99b09fcc565391a4f31861780468 | refs/heads/master | 2022-12-25T09:30:22.407644 | 2020-10-10T18:03:35 | 2020-10-10T18:03:35 | 302,960,750 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/home/celal/VSCProjects/FlaskProject/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
84435aed610560af56cf4755bfdce4e91530bb4a | f3693916a8b118bf139364604dac3f51235ed613 | /functional/Components/Distributions/Distribution_POST_Optional/test_TC_43677_2_Distributions_POST_Distributions_Vnet_Pptx_Slide_Option.py | bc58905895d05cefd139b0850212850500682fef | []
| no_license | muktabehera/QE | e7d62284889d8241d22506f6ee20547f1cfe6db1 | 3fedde591568e35f7b80c5bf6cd6732f8eeab4f8 | refs/heads/master | 2021-03-31T02:19:15.369562 | 2018-03-13T02:45:10 | 2018-03-13T02:45:10 | 124,984,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,675 | py | # -*- coding: UTF-8 -*-
"""PFE Component Tests - Distributions.
* TC-43677 - Distributions POST:
Verify that user is able to send slides(PPT, PPTx) for distribution using request POST "/distributions" with VideoNet as public delivery system.
Equivalent test CURL command:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/distributions"
Same, with test data:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X POST -d @<JSON_data_file> -H "Content-Type: application/json"
"<PF_host>://<client_host>/distributions"
JSON data sent to PathFinder in this test:
{'activationDate': '2017-09-20T07:36:46.542Z',
'distributionPolicy': 'OPTIONAL',
'files': [{'id': 'vnetPPTXOpt',
'sourceUrl': 'qedorigin://Auto_storage/slidex.pptx',
'streamMetadata': {'bitrateKbps': 100,
'contentType': 'UNSPECIFIED',
'height': 5,
'mimeType': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'width': 10}}],
'id': 'vnetSlidePPTXOpt',
'name': 'Distribution with vnet PPTX Slide Opt',
'targetAudiences': [{'id': 'Broadcast_Videonet_Audience'}]}
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.draft # remove this after script passed unit tests successfuly
@pytest.mark.components
@pytest.allure.story('Distributions')
@pytest.allure.feature('POST')
class Test_PFE_Components(object):
"""PFE Distributions test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-43677')
@pytest.mark.Distributions
@pytest.mark.POST
def test_TC_43677_POST_Distributions_Distributions_Vnet_Pptx_Slide(self, context):
"""TC-43677 - Distributions-POST
Verify that user is able to send slides(PPT, PPTx) for distribution using request POST "/distributions" with VideoNet as public delivery system."""
# Define a test step
with pytest.allure.step("""Verify that user is able to send slides(PPT, PPTx) for distribution using request POST "/distributions" with VideoNet as public delivery system."""):
### Positive test example
# Test case configuration
distributionDetails = context.sc.DistributionDetails(
activationDate='2017-09-20T07:36:46.542Z',
distributionPolicy='OPTIONAL',
expirationDate=None,
files=[{
'id': 'vnetPPTXOpt',
'sourceUrl': 'qedorigin://Auto_storage/slidex.pptx',
'streamMetadata': {
'bitrateKbps':
100,
'width':
10,
'height':
5,
'mimeType':
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'contentType':
'UNSPECIFIED'
}
}],
id='vnetSlidePPTXOpt',
name='Distribution with vnet PPTX Slide Opt',
status=None,
tags=None,
targetAudiences=[{
'id': 'Broadcast_Videonet_Audience'
}])
# createEntity the Distributions.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
response = check(
context.cl.Distributions.createEntity(
body=distributionDetails
)
)
### Can add tests here to validate the response content
with pytest.allure.step("""Verify that user is able to send slides(PPT, PPTx) for distribution using request POST "/distributions" with VideoNet as public delivery system."""):
### Negative test example
# Test case configuration
distributionDetails = context.sc.DistributionDetails(
activationDate='2017-09-20T07:36:46.542Z',
distributionPolicy='OPTIONAL',
expirationDate=None,
files=[{
'id': 'vnetPPTXOpt',
'sourceUrl': 'qedorigin://Auto_storage/slidex.pptx',
'streamMetadata': {
'bitrateKbps':
100,
'width':
10,
'height':
5,
'mimeType':
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'contentType':
'UNSPECIFIED'
}
}],
id='vnetSlidePPTXOpt',
name='Distribution with vnet PPTX Slide Opt',
status=None,
tags=None,
targetAudiences=[{
'id': 'Broadcast_Videonet_Audience'
}])
# prepare the request, so we can modify it
request = context.cl.Distributions.createEntity(
body=distributionDetails
)
### Invalid JSON Error injection example
### Errors that result in valid JSON can be configured above.
### Otherwise, uncomment the code below (request.future....)
# Get the generated payload and corrupt the metric
# request.future.request.data = request.future.request.data.replace(
# '"metric": 1,', '"metric":,'
# )
# createEntity the Distributions, and check we got the error we expect
try:
client, response = check(
request,
quiet=True, returnResponse=True
)
except (HTTPBadRequest, HTTPForbidden) as e: # 400, 403 error
get_error_message(e) | expect.any(
should.start_with('may not be empty'),
should.start_with('Invalid page parameter specified'),
should.contain('Invalid Authorization Token')
)
else:
raise Exception(
"Expected error message, got {} status code instead.".format(
response.status_code))
| [
"[email protected]"
]
| |
440374b2f3813c91ed4c3c16714460c04352ff1d | 1ba58b17f33122abf4236e9e430a51d375e0eb53 | /km73/Ruban_Yehor/4/task1.py | 3b5e7ca9dacb7e460a733371cb9aa90330097712 | []
| no_license | igortereshchenko/amis_python | c4f8d86b88ab036d08ff0ce35c9b42ebeabecc42 | c6f0f2a70c82d5f269b3078eb296f82271b5bb10 | refs/heads/master | 2021-10-22T16:21:19.990650 | 2017-11-01T07:26:54 | 2017-11-01T07:26:54 | 104,785,028 | 0 | 139 | null | 2020-04-21T21:27:09 | 2017-09-25T18:11:42 | Python | UTF-8 | Python | false | false | 221 | py | a = float(input("Введите первое число - "))
b = float(input("Введите второе число - "))
if a > b:
ans = b
else:
ans = a
print("меньшее число -",ans)
input()
| [
"[email protected]"
]
| |
9a49844ddc8e2920e88c79f6f3cc8dc15536f458 | cb2ddcde8311d06f99e2308e94c58036a393f592 | /src/byro/members/migrations/0010_memberbalance.py | 8501edb4909802ca93bfb93b0e0de14116c680fc | [
"Apache-2.0"
]
| permissive | Lagertonne/byro | 9eea069709a7eeb8a80e024af97bc93fb019efa8 | d2d05b96d75f94848bd8b9af1a556a4a1e080320 | refs/heads/master | 2022-11-19T11:39:29.699413 | 2020-07-14T20:56:16 | 2020-07-14T20:56:16 | 279,690,577 | 0 | 0 | Apache-2.0 | 2020-07-14T20:52:09 | 2020-07-14T20:52:08 | null | UTF-8 | Python | false | false | 2,136 | py | # Generated by Django 2.1.8 on 2019-04-15 12:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("members", "0009_auto_20180512_1810")]
operations = [
migrations.CreateModel(
name="MemberBalance",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"reference",
models.CharField(
blank=True,
help_text="For example an invoice number or a payment reference",
max_length=50,
null=True,
unique=True,
verbose_name="Reference",
),
),
(
"amount",
models.DecimalField(
decimal_places=2, max_digits=8, verbose_name="Amount"
),
),
("start", models.DateTimeField(verbose_name="Start")),
("end", models.DateTimeField(verbose_name="End")),
(
"state",
models.CharField(
choices=[
("paid", "paid"),
("partial", "partially paid"),
("unpaid", "unpaid"),
],
default="unpaid",
max_length=7,
),
),
(
"member",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="balances",
to="members.Member",
),
),
],
)
]
| [
"[email protected]"
]
| |
0e2c57e01d581a53bb2452113c588ec91aa41688 | 93cc6795fc1b7f6a06b08480ad4fbde46fa02c7c | /base/files_for_ssh/update_or_create_bonus_template.py | 74007cf86b0fcbe1dbbc57ee7b22107730a02253 | []
| no_license | A-Zorg/msw_api | 4a04eae9738e77b528e79496b6653d3a07109ca5 | ccd73b7675f3d477a2eec30808eff975a247e70c | refs/heads/master | 2023-08-16T08:51:47.506514 | 2021-09-20T19:21:46 | 2021-09-20T19:21:46 | 325,813,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | from index.models import CustomUser
from reconciliation.models import Bonus
from accounting_system.models import AccountType
acc_type = AccountType.objects.get(id={ACC_ID})
try:
bonus_object = Bonus.objects.get(account_type=acc_type)
bonus_object.decimal_percentage = {VALUE}
bonus_object.save()
except:
bonus_object = Bonus.objects.create(
account_type=acc_type,
decimal_percentage={VALUE}
)
bonus_object.save()
| [
"[email protected]"
]
| |
4ef44e5d20b17902e764ba4b376780476f7c678e | d3efc82dfa61fb82e47c82d52c838b38b076084c | /ETF/Creation_SA/YW_ETFSS_SZSG_060.py | 37d438d6681057abde41659bd9c6bf133b8f17ed | []
| no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import time
sys.path.append("/home/yhl2/workspace/xtp_test/ETF")
from import_common import *
sys.path.append("/home/yhl2/workspace/xtp_test/ETF/etf_service")
from ETF_Basket_Add_Real import etf_basket_add_real
from ETF_GetComponentShare import etf_get_all_component_stk
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ETFSS_SZSG_060(xtp_test_case):
def test_YW_ETFSS_SZSG_060(self):
# -----------ETF申购-------------
title = '深圳ETF申购--全部成交(数量最大单位&费用>min)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'case_ID': 'ATC-202-59',
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title + ', case_ID=' + case_goal['case_ID'])
unit_info = {
'ticker': '169165', # etf代码
'etf_unit': 9, # etf申购单位数
}
# -----------查询ETF申购前成分股持仓-------------
component_stk_info = etf_get_all_component_stk(unit_info['ticker'])
# -----------ETF申购-------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryEtfQty(unit_info['ticker'], '2', '14', '2', '0',
'B', case_goal['期望状态'], Api)
# 定义委托参数信息------------------------------------------
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'用例错误原因': '获取下单参数失败, ' + stkparm['错误原因'],
}
etf_query_log(case_goal, rs)
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker':
stkparm['证券代码'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_PURCHASE'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
int(unit_info['etf_unit'] * stkparm['最小申赎单位']),
}
EtfParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = etfServiceTest(Api, case_goal, wt_reqs, component_stk_info)
etf_creation_log(case_goal, rs)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
a3a2cfa8d5e7e73ab92e6d6d8daef6b1f6fadefe | f4efa1e5ef98616b0044a96e08fede1584a5a5a5 | /atom-editor/lilac.py | 1305d1741a2f23ae0165465e8471cd80acbc652c | []
| no_license | LawrenceCWC/repo | fcafda53570c03703a8988b1bce17a2184a4f6bc | c2d614e11a9ea1b9c4bc9bd08b041bf9ee8ce2cd | refs/heads/master | 2020-12-25T04:19:43.803869 | 2016-06-22T12:28:55 | 2016-06-22T12:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | #!/usr/bin/env python3
#
# This is a complex version of lilac.py for building
# a package from AUR.
#
# You can do something before/after building a package,
# including modify the 'pkgver' and 'md5sum' in PKBUILD.
#
# This is especially useful when a AUR package is
# out-of-date and you want to build a new one, or you
# want to build a package directly from sourceforge but
# using PKGBUILD from AUR.
#
# See also:
# [1] ruby-sass/lilac.py
# [2] aufs3-util-lily-git/lilac.py
# [3] octave-general/lilac.py
#
from lilaclib import *
build_prefix = 'extra-x86_64'
pre_build = aur_pre_build
post_build = aur_post_build
# do some cleanup here after building the package, regardless of result
# def post_build_always(success):
# pass
if __name__ == '__main__':
single_main(build_prefix)
| [
"[email protected]"
]
| |
0abeef728ce5d91b2a87d84cc216dc46f6697521 | 830f50885bbf7cdeffc08097b55f662a498cf518 | /python/downloader/FileSelectionDialog.py | 00f5a2ffa23e3a11bbed65bf96d9e9293f090628 | []
| no_license | precimilo/mcandre | 86b2e77e28e3bd14d02e40eb9978ae4b7ccf9fcd | e9ab9e3fce7aba93b6528c40e06bde4ae0d461a7 | refs/heads/master | 2020-12-30T17:32:37.406612 | 2013-05-09T18:46:46 | 2013-05-09T18:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | # FileSelectionDialog.py - John Finlay (http://www.pygtk.org/pygtk2tutorial/index.html)
# Andrew Pennebaker
import gtk
class FileSelectionDialog:
PENDING="Pending"
OK="OK"
CANCEL="Cancel"
def __init__(self, titleText="File Selecion", selectionText=""):
self.state=self.PENDING
self.fileSelection=gtk.FileSelection(title=titleText)
self.fileSelection.selection_entry.set_text(selectionText)
self.fileSelection.ok_button.connect("clicked", self.okEvent)
self.fileSelection.cancel_button.connect("clicked", self.cancelEvent)
self.fileSelection.show_all()
# loop until button clicked
while self.state==self.PENDING:
while gtk.events_pending():
gtk.main_iteration()
def okEvent(self, widget=None, event=None, data=None):
self.fileName=self.fileSelection.get_filename()
self.state=self.OK
self.fileSelection.destroy()
def cancelEvent(self, widget=None, event=None, data=None):
self.state=self.CANCEL
self.fileSelection.destroy()
def getFileName(self):
return self.fileName | [
"[email protected]"
]
| |
48283e3e193ef9a2997ee9f3474a16b8b385f713 | b5f38cc8a97f67ba1df0bea0e111ad0d3f14dc13 | /test/asyncio_tests/test_asyncio_bulk.py | 6cb0336378f57f8d9eaf68d25394b666a1aa11ff | [
"Apache-2.0"
]
| permissive | yutiansut/motor | 10cb8e68e8c776fa33262608e13b611941fbdb13 | 132352beda3e4215e68991c5165b4ccd06e71a2c | refs/heads/master | 2021-09-24T07:11:36.995179 | 2018-01-12T10:05:58 | 2018-01-12T10:05:58 | 106,511,806 | 1 | 0 | Apache-2.0 | 2018-10-05T02:19:35 | 2017-10-11T05:59:51 | Python | UTF-8 | Python | false | false | 3,292 | py | # Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Motor's bulk API with asyncio."""
import unittest
from pymongo.errors import BulkWriteError
from motor.motor_asyncio import AsyncIOMotorBulkOperationBuilder
from test.asyncio_tests import asyncio_test, AsyncIOTestCase
class TestAsyncIOBulk(AsyncIOTestCase):
# Little testing is needed: Most of the logic is in PyMongo, and Motor's
# bulk operations are lightly tested with Tornado already.
@asyncio_test(timeout=30)
def test_multiple_error_ordered_batch(self):
yield from self.collection.delete_many({})
yield from self.collection.create_index('a', unique=True)
try:
bulk = self.collection.initialize_ordered_bulk_op()
self.assertTrue(isinstance(bulk,
AsyncIOMotorBulkOperationBuilder))
bulk.insert({'b': 1, 'a': 1})
bulk.find({'b': 2}).upsert().update_one({'$set': {'a': 1}})
bulk.find({'b': 3}).upsert().update_one({'$set': {'a': 2}})
bulk.find({'b': 2}).upsert().update_one({'$set': {'a': 1}})
bulk.insert({'b': 4, 'a': 3})
bulk.insert({'b': 5, 'a': 1})
try:
yield from bulk.execute()
except BulkWriteError as exc:
result = exc.details
self.assertEqual(exc.code, 65)
else:
self.fail("Error not raised")
self.assertEqual(1, result['nInserted'])
self.assertEqual(1, len(result['writeErrors']))
cursor = self.collection.find({}, {'_id': False})
docs = yield from cursor.to_list(None)
self.assertEqual([{'a': 1, 'b': 1}], docs)
finally:
yield from self.collection.drop()
@asyncio_test
def test_single_unordered_batch(self):
yield from self.collection.delete_many({})
bulk = self.collection.initialize_unordered_bulk_op()
self.assertTrue(isinstance(bulk,
AsyncIOMotorBulkOperationBuilder))
bulk.insert({'a': 1})
bulk.find({'a': 1}).update_one({'$set': {'b': 1}})
bulk.find({'a': 2}).upsert().update_one({'$set': {'b': 2}})
bulk.insert({'a': 3})
bulk.find({'a': 3}).remove()
result = yield from bulk.execute()
self.assertEqual(0, len(result['writeErrors']))
upserts = result['upserted']
self.assertEqual(1, len(upserts))
self.assertEqual(2, upserts[0]['index'])
self.assertTrue(upserts[0].get('_id'))
a_values = yield from self.collection.distinct('a')
self.assertEqual(
set([1, 2]),
set(a_values))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
48aa07668d158a84f581a1cc6189dcb51bb02fd9 | eaa71d0669f9f161c15dc45c37fadb1ce2bcea9e | /Pagina Web/CP_S10/app/rutas.py | b6b347366bc23ec9a3952df3657ed7d6bd53528b | []
| no_license | luiskar268/Ciclo-3 | 566f4bec8af5f05ff458d698c384238579e095d6 | 5a076959c1c0958290133197f9dde8d0e7f1a388 | refs/heads/master | 2023-08-13T14:43:06.925887 | 2021-10-08T02:37:26 | 2021-10-08T02:37:26 | 407,919,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from app import app
from flask import render_template
from app.forms import FormInicio
@app.route('/')
@app.route('/index')
def index():
usuario = {'usuario':'...'}
comentarios = [
{
'autor':{'usuario':'...'},
'comentario':'...'
},
{
'autor':{'usuario':'...'},
'comentario':'...'
}
]
return render_template('index.html', titulo="Inicio", usuario=usuario, comentarios=comentarios)
@app.route('/login')
def login():
form = FormInicio()
return render_template('iniciar_sesion.html',titulo='Iniciar Sesión', form=form) | [
"[email protected]"
]
| |
5f9004f06c556e9677e12b41dec0c7c9d095e410 | 3ffeeae8a9a3245d8998d94aa08f680f00056cad | /26.删除排序数组中的重复项.py | caabdfab8cced80dd2fba20d79a5e03d77e43c4d | []
| no_license | Ezi4Zy/leetcode | 6e293e5c07a7d8c3e38f9445ff24330134ef6c48 | 9d394cd2862703cfb7a7b505b35deda7450a692e | refs/heads/master | 2022-04-09T14:11:36.957861 | 2022-03-09T10:30:30 | 2022-03-09T10:30:30 | 57,290,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | #
# @lc app=leetcode.cn id=26 lang=python
#
# [26] 删除排序数组中的重复项
#
# @lc code=start
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
begin = 0
end = 1
while end < len(nums):
if nums[begin] != nums[end]:
begin += 1
nums[begin] = nums[end]
end += 1
return begin+1
# @lc code=end
| [
"[email protected]"
]
| |
be24d8d03ccdda316fc996f645d73db8ce92e3b6 | 9f387c703dbf4d970d0259424c7b299108c369f5 | /dd_sdk_1_0/dd_sdk_1_0/models/snmp_username.py | 0999f08689f3dea29ff5b00fe9032335d786557e | []
| no_license | gcezaralmeida/datadomain_sdk_python | c989e6846bae9435c523ab09e230fc12d020f7f1 | e102ec85cea5d888c8329626892347571832e079 | refs/heads/main | 2023-08-23T22:42:47.083754 | 2021-10-25T21:52:49 | 2021-10-25T21:52:49 | 370,805,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,753 | py | # coding: utf-8
"""
DataDomain Rest API Documentation
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from dd_sdk_1_0.configuration import Configuration
class SnmpUsername(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, _configuration=None): # noqa: E501
"""SnmpUsername - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SnmpUsername, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SnmpUsername):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SnmpUsername):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
]
| |
83a10737c06db1f4578aacbdd1b538c1399319b9 | ce06aaf43d7ea2b62640748c1e41856d038d86ac | /examples/z/test_z_bfoa_multiniche_clustering_v1.py | 485d6402cc28ee8152398a9028820fb59d3c71d0 | []
| no_license | dananjayamahesh/MunichBFOR | 87ae1b672cddd5958d4080011389b1fa33384677 | 0cdb24344345429decf58ef85e444f72b40fd061 | refs/heads/master | 2023-02-16T18:32:34.303146 | 2023-02-10T11:27:01 | 2023-02-10T11:27:01 | 144,000,550 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,893 | py | import SwarmPackagePy
from SwarmPackagePy import testFunctions as tf
from SwarmPackagePy import multiniche_benchmark as mbtf
from SwarmPackagePy import animation, animation3D
import matplotlib.pyplot as plt
'''
Some note on clsutering
d_max has a huge impact, 3 is not woring, but double the size of d_min with d_min=0.2 is working
'''
''' BEST WORK - NICELY WORKING- SHARING on F3 TEST
n =100 #50
r = 20
lamda = 1
f = tf.F3_test #tf.F2_var1 #tf.F2_var1
dim =2
d_min = 0.2
d_max = 0.4
sigma_share = 0.2 #0.6 NOT WORKING
step_size = 0.1 #0.05
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share)
#Adaptive
#alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, 0.05, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share)
'''
''' 2018/04/17
#Clsuterign works for 4 niche, But what happens in the 2 niche -BEST DEMO
#BEST 2 - with Swarm2-WOW - WOOOOOOOOOOWWWW
n =100 #50
r = 19
lamda = 100
f = tf.gaussian_diff_multimodal4_positive
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
#d_min= 0.2, d_max = 0.3
d_min = 0.2
d_max = 0.4
#0.3 #2 #0.3 #0.6 #0.4 #3 #0.4 #3 #3 is not quite working, 0.4 is perfect, 3 is the error
clust_alpha = 2
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, 0.1, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
#both none and adaptive1 working
'''
##############################################################################################################
#New Benchmark Test Cases
'''rastrigin Working , Change last Visualize, LAST one is the BEST, This is EXTRA, Only Change is n=100 not 500
n =100 #50
r = 5 #lb =-1 ub=5
lamda = 100
#f = tf.f5_griewank_function
f = tf.f4_rastrigin_function_var1
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
d_min = 0.2#0.2 #1
d_max = 0.4 #1 #3 #initially 3 Although 1 is good
clust_alpha = 2
step_size = 0.01 #0.005 #0.1 0.005 is also good
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -1, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
'''
#RASTRIGIN VAR2
''' ************************************************WORKING , I sed -1 to 2 as range, ONLY CHAGE from sharing
#rastrigin BEST DEMO - WOW - NICHES and CLUSTERS are equals
n = 200 #100#50
r = 5 #lb =-1 ub=5
lamda = 100
#f = tf.f5_griewank_function
f = tf.f4_rastrigin_function_var1
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
d_min = 0.2#0.2 #1
d_max = 0.4 #1 #3 #initially 3 Although 1 is good
clust_alpha = 2
step_size = 0.01 #0.005 #0.1 0.005 is also good
#fixed step working
#alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
#adaptive - WORKING, BUT OT WELL
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
'''
#RATRIGIN VAR 1
''' ************************************************WORKING , I sed -1 to 2 as range, ONLY CHAGE from sharing
#rastrigin BEST DEMO - WOW - NICHES and CLUSTERS are equals
n =100 #50
r = 2 #lb =-1 ub=5
lamda = 100
#f = tf.f5_griewank_function
f = tf.f4_rastrigin_function_var1
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
d_min = 0.2#0.2 #1
d_max = 0.4 #1 #3 #initially 3 Although 1 is good
clust_alpha = 2
step_size = 0.01 #0.005 #0.1 0.005 is also good
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
'''
''' ************************************************WORKING
#rastrigin BEST DEMO - WOW - NICHES and CLUSTERS are equals
n =100 #50
r = 5 #lb =-1 ub=5
lamda = 100
#f = tf.f5_griewank_function
f = tf.f4_rastrigin_function_var1
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
d_min = 0.2#0.2 #1
d_max = 0.4 #1 #3 #initially 3 Although 1 is good
clust_alpha = 2
step_size = 0.01 #0.005 #0.1 0.005 is also good
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -1, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
'''
''' WORKING
#M special multimodal function m WOrking only for fixed length
n =100 #50
r = 1
lamda = 1000 #400 #100 #100 is not working, massive displacemets
#f = tf.f5_griewank_function
f = tf.f7_M_function
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
d_min = 0.2#0.2 #1
d_max = 0.4 #3
clust_alpha = 2
step_size = 0.005 #0.1
#alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, 0, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
#Full range -1 to 1
#alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
#adaptive4
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
'''
""" Great Output - BEST DEMO -IDEAL for DEMO- Carefull about the stepsize
#Griewank
n =100 #50
r = 10
lamda = 100
#f = tf.f5_griewank_function
f = tf.f5_griewank_function_var1
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
d_min = 0.2#0.2 #1
d_max = 0.4 #3
clust_alpha = 2
step_size = 0.05 #0.1 #0.01 #0.1 #0.01 is BEST, 0.1 is Working, 0.05 is decent
#VGOOD alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
#ADAPTIVE is also VGOOD
"""
#BEST ACKLEY WITH SHARING SCHEME
#'''
#ACkely 1
n =100 #50
r = 2
lamda = 400#100 is the Original, but no workinh
f = tf.f3_ackley_function
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
d_min = 0.2#0.2 #1
d_max = 0.4 #0.3
clust_alpha = 2
step_size = 0.01 #0.1
#non adaptive os working fine
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
#alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, step_size, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
#adaptive Have some issues
#'''
###############################################################################################################
'''
#Clsuterign works for 4 niche, But what happens in the 2 niche
#BEST 2 - with Swarm2-WOW - WOOOOOOOOOOWWWW
n =100 #50
r = 19
lamda = 100
f = tf.gaussian_diff_multimodal4_positive
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
#d_min= 0.2, d_max = 0.3
d_min = 1 #0.2 #1
d_max = 2 #0.3 #0.6 #0.4 #3 #0.4 #3 #3 is not quite working, 0.4 is perfect, 3 is the error
clust_alpha = 2
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, 0.1, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'none', 'swarm2', 'false', sigma_share,d_min, d_max, clust_alpha)
#both none and adaptive1 working
'''
#TEST CASES
######################################################
'''NOT GOOD WITH DIFF 2
n =100 #50
r = 19
lamda = 100
f = tf.gaussian_diff_multimodal_positive
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, 0.05, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share)
'''
#######################################################
''' #BEST CLUSTERING d_max =3, d_min=1 d_min = 1 d_max = 5
clust_alpha = 1
n =100 #50
r = 20
lamda = 100
f = tf.gaussian_diff_multimodal4_positive
dim =2
d_min = 1
d_max = 5
clust_alpha = 1
sigma_share = 0.2 #0.6 NOT WORKING
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, 0.05, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share, d_min, d_max, clust_alpha)
'''
#######################################################
'''
#BEST 2 - with Swarm2-WOW - WOOOOOOOOOOWWWW - Sharing
n =100 #50
r = 20
lamda = 100
f = tf.gaussian_diff_multimodal4_positive
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
alh = SwarmPackagePy.z_bfoa_multiniche_clustering_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, 0.05, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share)
'''
#######################################################
"""
n =100 #50
r = 15
lamda = 400
f = tf.F4 #tf.F2_var1 #tf.F2_var1
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
alh = SwarmPackagePy.z_bfoa_multiniche_clearing_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, 0.05, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share)
"""
"""
Masssive Deceptive Multimodal
n =100 #50
r = 5
lamda = 10000
f = tf.F5_var1 #tf.F2_var1 #tf.F2_var1
dim =2
sigma_share = 0.2 #0.6 NOT WORKING
alh = SwarmPackagePy.z_bfoa_multiniche_clearing_v1(n, f, -r, r, dim, 100, 16, 2, 8, 12, 0.05, 0.25, 0.05, 0.2, 0.05, 10, lamda, 0.03, 'adaptive1', 'swarm2', 'false', sigma_share)
"""
fits = alh._get_jfits()
plt.plot(fits, 'b', label='J-fit')
jcclist = alh._get_jcclist()
plt.plot(jcclist,'r', label='J-cc')
jarlist = alh._get_jarlist()
plt.plot(jarlist,'g', label='J-ar')
jlist = alh._get_jlist()
plt.plot(jlist,'y', label='J')
jblist = alh._get_jblist()
plt.plot(jblist,'p', label='J-best')
plt.legend()
plt.show()
#plt.pause(0.001)
#plt.subplot(2, 1, 2)
steps = alh._get_csteps()
#print(steps)
plt.plot(steps)
plt.show()
#Average Over Genrations
avgsteps = alh._get_avg_steps()
avgj = alh._get_avg_j()
avgjfit = alh._get_avg_jfit()
plt.plot(avgj,'b')
plt.plot(avgjfit, 'r')
plt.show()
plt.plot(avgsteps,'b')
plt.show()
numniches = alh._get_num_niches()
plt.plot(numniches,'r')
plt.show()
animation(alh.get_agents(), f, -r, r,dim)
animation3D(alh.get_agents(), f, -r, r,dim)
#only for assymetric domain, M function
#animation(alh.get_agents(), f, 0, r,dim)
#animation3D(alh.get_agents(), f, 0, r,dim)
#for rastrigan, f4_rastrigin_function_var1
#animation(alh.get_agents(), f, -1, r,dim)
#animation3D(alh.get_agents(), f, -1, r,dim)
| [
"[email protected]"
]
| |
de1022e98235b9d739417fb743639087694beff0 | cd921f57b4ea51f8cb598c76e6766dc530909b8a | /tensorflow_federated/python/tensorflow_libs/tensor_utils_test.py | e928317481ee4b261d976b9a17a77600e5ef5f55 | [
"Apache-2.0"
]
| permissive | Catherineylp/federated | f4d30d8eb7fa718ac5d1a62549f244d03120cc73 | 7a5549f3fb0eb2e3b5cdcb4788a8856cbfa17416 | refs/heads/master | 2021-07-12T19:01:44.935095 | 2020-09-21T02:24:28 | 2020-09-21T02:24:28 | 202,102,353 | 0 | 0 | Apache-2.0 | 2019-08-13T08:45:24 | 2019-08-13T08:45:23 | null | UTF-8 | Python | false | false | 7,678 | py | # Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensor_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.common_libs import test
from tensorflow_federated.python.tensorflow_libs import tensor_utils
class TensorUtilsTest(test.TestCase):
def test_check_nested_equal(self):
nested_dict = {
'KEY1': {
'NESTED_KEY': 0
},
'KEY2': 1,
}
nested_list = [('KEY1', ('NESTED_KEY', 0)), ('KEY2', 1)]
flat_dict = {
'KEY1': 0,
'KEY2': 1,
}
nested_dtypes = {
'x': [tf.int32, tf.float32],
'y': tf.float32,
}
nested_shapes = {
# N.B. tf.TensorShape([None]) == tf.TensorShape([None])
# returns False, so we can't use a None shape here.
'x': [[1], [3, 5]],
'y': [1],
}
# Should not raise an exception.
tensor_utils.check_nested_equal(nested_dict, nested_dict)
tensor_utils.check_nested_equal(nested_list, nested_list)
tensor_utils.check_nested_equal(flat_dict, flat_dict)
tensor_utils.check_nested_equal(nested_dtypes, nested_dtypes)
tensor_utils.check_nested_equal(nested_shapes, nested_shapes)
with self.assertRaises(TypeError):
tensor_utils.check_nested_equal(nested_dict, nested_list)
with self.assertRaises(ValueError):
# Different nested structures.
tensor_utils.check_nested_equal(nested_dict, flat_dict)
# Same as nested_dict, but using float values. Equality still holds for
# 0 == 0.0 despite different types.
nested_dict_different_types = {
'KEY1': {
'NESTED_KEY': 0.0
},
'KEY2': 1.0,
}
tf.nest.assert_same_structure(nested_dict, nested_dict_different_types)
# Same as nested_dict but with one different value
nested_dict_different_value = {
'KEY1': {
'NESTED_KEY': 0.5
},
'KEY2': 1.0,
}
with self.assertRaises(ValueError):
tensor_utils.check_nested_equal(nested_dict, nested_dict_different_value)
tensor_utils.check_nested_equal([None], [None])
def always_neq(x, y):
del x, y
return False
with self.assertRaises(ValueError):
tensor_utils.check_nested_equal([1], [1], always_neq)
def test_to_var_dict(self):
v1 = tf.Variable(0, name='v1')
v2 = tf.Variable(0, name='v2')
d0 = tensor_utils.to_var_dict([])
self.assertIsInstance(d0, collections.OrderedDict)
self.assertEmpty(d0)
d1 = tensor_utils.to_var_dict([v1])
self.assertIsInstance(d1, collections.OrderedDict)
self.assertLen(d1, 1)
self.assertEqual(d1['v1'], v1)
d2 = tensor_utils.to_var_dict([v1, v2])
self.assertIsInstance(d2, collections.OrderedDict)
self.assertLen(d2, 2)
self.assertEqual(d2['v1'], v1)
self.assertEqual(d2['v2'], v2)
with self.assertRaises(TypeError):
tensor_utils.to_var_dict(v1)
with self.assertRaises(TypeError):
tensor_utils.to_var_dict([tf.constant(1)])
def test_to_var_dict_preserves_order(self):
a = tf.Variable(0, name='a')
b = tf.Variable(0, name='b')
c = tf.Variable(0, name='c')
var_dict = tensor_utils.to_var_dict([c, a, b])
self.assertEqual(['c', 'a', 'b'], list(var_dict.keys()))
def test_to_var_dict_duplicate_names(self):
v1 = tf.Variable(0, name='foo')
v2 = tf.Variable(0, name='foo')
assert v1.name == v2.name
with self.assertRaisesRegexp(ValueError, 'multiple.*foo'):
tensor_utils.to_var_dict([v1, v2])
def test_to_odict(self):
d1 = {'b': 2, 'a': 1}
odict1 = tensor_utils.to_odict(d1)
self.assertIsInstance(odict1, collections.OrderedDict)
self.assertCountEqual(d1, odict1)
odict2 = tensor_utils.to_odict(odict1)
self.assertEqual(odict1, odict2)
with self.assertRaises(TypeError):
tensor_utils.to_odict({1: 'a', 2: 'b'})
def test_zero_all_if_any_non_finite(self):
def expect_ok(structure):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, structure)
except AssertionError:
self.fail('Expected to get input {} back, but instead got {}'.format(
structure, result))
self.assertEqual(error, 0)
expect_ok([])
expect_ok([(), {}])
expect_ok(1.1)
expect_ok([1.0, 0.0])
expect_ok([1.0, 2.0, {'a': 0.0, 'b': -3.0}])
def expect_zeros(structure, expected):
with tf.Graph().as_default():
result, error = tensor_utils.zero_all_if_any_non_finite(structure)
with self.session() as sess:
result, error = sess.run((result, error))
try:
tf.nest.map_structure(np.testing.assert_allclose, result, expected)
except AssertionError:
self.fail('Expected to get zeros, but instead got {}'.format(result))
self.assertEqual(error, 1)
expect_zeros(np.inf, 0.0)
expect_zeros((1.0, (2.0, np.nan)), (0.0, (0.0, 0.0)))
expect_zeros((1.0, (2.0, {
'a': 3.0,
'b': [[np.inf], [np.nan]]
})), (0.0, (0.0, {
'a': 0.0,
'b': [[0.0], [0.0]]
})))
def test_is_scalar_with_list(self):
self.assertRaises(TypeError, tensor_utils.is_scalar, [10])
def test_is_scalar_with_bool(self):
self.assertRaises(TypeError, tensor_utils.is_scalar, True)
def test_is_scalar_with_tf_constant(self):
self.assertTrue(tensor_utils.is_scalar(tf.constant(10)))
def test_is_scalar_with_scalar_tf_variable(self):
self.assertTrue(tensor_utils.is_scalar(tf.Variable(0.0, 'scalar')))
def test_is_scalar_with_nonscalar_tf_variable(self):
self.assertFalse(
tensor_utils.is_scalar(tf.Variable([0.0, 1.0], 'notscalar')))
def test_same_shape(self):
self.assertTrue(
tensor_utils.same_shape(tf.TensorShape(None), tf.TensorShape(None)))
self.assertTrue(
tensor_utils.same_shape(tf.TensorShape([None]), tf.TensorShape([None])))
self.assertTrue(
tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([1])))
self.assertTrue(
tensor_utils.same_shape(
tf.TensorShape([None, 1]), tf.TensorShape([None, 1])))
self.assertTrue(
tensor_utils.same_shape(
tf.TensorShape([1, 2, 3]), tf.TensorShape([1, 2, 3])))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape(None), tf.TensorShape([1])))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape(None)))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([None])))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape([1]), tf.TensorShape([2])))
self.assertFalse(
tensor_utils.same_shape(tf.TensorShape([1, 2]), tf.TensorShape([2, 1])))
if __name__ == '__main__':
test.main()
| [
"[email protected]"
]
| |
88c956a598941e1923e9029d6acc27a0a83a987e | a8f39f241598dce6d876f2b535327aeac4902170 | /tests/api2/network.py | fed153986b6379c6c5d4e4047c399b57ad8ab93f | [
"BSD-3-Clause"
]
| permissive | ghos/freenas | 321b8d36ec16b715ffd6fb60768901bfb276c2a1 | f1049d30355c70eb3f2c4b841bef71aa836890c9 | refs/heads/master | 2020-12-07T17:14:57.513807 | 2020-01-07T20:59:41 | 2020-01-07T20:59:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | #!/usr/bin/env python3.6
# Author: Eric Turgeon
# License: BSD
import pytest
import sys
import os
apifolder = os.getcwd()
sys.path.append(apifolder)
from auto_config import hostname, domain
from functions import GET, PUT
from config import *
BRIDGEGWReason = "BRIDGEGW not in ixautomation.conf"
BRIDGENETMASKReason = "BRIDGENETMASK not in ixautomation.conf"
Reason = "AD_DOMAIN BRIDGEDNS are missing in ixautomation.conf"
dns_cfg = pytest.mark.skipif("BRIDGEDNS" not in locals(), reason=Reason)
def test_01_get_default_network_general_summary():
results = GET("/network/general/summary/")
assert results.status_code == 200
assert isinstance(results.json(), dict), results.text
assert isinstance(results.json()['default_routes'], list), results.text
@dns_cfg
def test_02_configure_setting_domain_hostname_and_dns():
global payload
payload = {"domain": domain,
"hostname": hostname,
"ipv4gateway": gateway,
"nameserver1": BRIDGEDNS}
global results
results = PUT("/network/configuration/", payload)
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
@dns_cfg
@pytest.mark.parametrize('dkeys', ["domain", "hostname", "ipv4gateway",
"nameserver1"])
def test_03_looking_put_network_configuration_output_(dkeys):
assert results.json()[dkeys] == payload[dkeys], results.text
@dns_cfg
def test_04_get_network_configuration_info_():
global results
results = GET("/network/configuration/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
@dns_cfg
@pytest.mark.parametrize('dkeys', ["domain", "hostname", "ipv4gateway",
"nameserver1"])
def test_05_looking_get_network_configuration_output_(dkeys):
assert results.json()[dkeys] == payload[dkeys], results.text
| [
"[email protected]"
]
| |
ccc3218670bdf6b394fbb52fd0bbbeaf3534c15e | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script802.py | 4b4a5c3b512045d2824d977d9d741a8e82521de3 | []
| no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,663 | py |
# coding: utf-8
# Hey everyone, this is my first go at Kaggle competitions and Kernels.
#
# In this Kernel, I implemented kNN classifier from scratch.
# And the results got 97.1% accuracy on public leaderboard.
# In[ ]:
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
import time
get_ipython().run_line_magic('matplotlib', 'inline')
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load csv files to numpy arrays
def load_data(data_dir):
train_data = open(data_dir + "train.csv").read()
train_data = train_data.split("\n")[1:-1]
train_data = [i.split(",") for i in train_data]
# print(len(train_data))
X_train = np.array([[int(i[j]) for j in range(1,len(i))] for i in train_data])
y_train = np.array([int(i[0]) for i in train_data])
# print(X_train.shape, y_train.shape)
test_data = open(data_dir + "test.csv").read()
test_data = test_data.split("\n")[1:-1]
test_data = [i.split(",") for i in test_data]
# print(len(test_data))
X_test = np.array([[int(i[j]) for j in range(0,len(i))] for i in test_data])
# print(X_test.shape)
return X_train, y_train, X_test
class simple_knn():
"a simple kNN with L2 distance"
def __init__(self):
pass
def train(self, X, y):
self.X_train = X
self.y_train = y
def predict(self, X, k=1):
dists = self.compute_distances(X)
# print("computed distances")
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
k_closest_y = []
labels = self.y_train[np.argsort(dists[i,:])].flatten()
# find k nearest lables
k_closest_y = labels[:k]
# out of these k nearest lables which one is most common
# for 5NN [1, 1, 1, 2, 3] returns 1
# break ties by selecting smaller label
# for 5NN [1, 2, 1, 2, 3] return 1 even though 1 and 2 appeared twice.
c = Counter(k_closest_y)
y_pred[i] = c.most_common(1)[0][0]
return(y_pred)
def compute_distances(self, X):
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dot_pro = np.dot(X, self.X_train.T)
sum_square_test = np.square(X).sum(axis = 1)
sum_square_train = np.square(self.X_train).sum(axis = 1)
dists = np.sqrt(-2 * dot_pro + sum_square_train + np.matrix(sum_square_test).T)
return(dists)
# Let's read `../input/train.csv` and `../input/test.csv` files to numpy arrays.
#
# Print shapes of those arrays as a sanity check.
# In[ ]:
# runs for 35 seconds
data_dir = "../input/"
X_train, y_train, X_test = load_data(data_dir)
# In[ ]:
print(X_train.shape, y_train.shape, X_test.shape)
# Visualize random samples from training data.
# In[ ]:
# runs for 10 seconds
classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
num_classes = len(classes)
samples = 8
for y, cls in enumerate(classes):
idxs = np.nonzero([i == y for i in y_train])
idxs = np.random.choice(idxs[0], samples, replace=False)
for i , idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples, num_classes, plt_idx)
plt.imshow(X_train[idx].reshape((28, 28)))
plt.axis("off")
if i == 0:
plt.title(cls)
plt.show()
# In[ ]:
# just to visualize ith test image
plt.imshow(X_test[2311].reshape((28, 28)))
# Split testing data into batches as distances of 10,000 test images and
# 60,000 train images won't fit in memory.
# In[ ]:
# predict labels for batch_size number of test images at a time.
batch_size = 2000
# k = 3
k = 1
classifier = simple_knn()
classifier.train(X_train, y_train)
# As Kaggle kernels have 1200 seconds limit, I have divided the prediction step
# into two cells each cell running for 13 minutes and saving prediction to `predictions`.
# In[ ]:
# runs for 13 minutes
predictions = []
for i in range(int(len(X_test)/(2*batch_size))):
# predicts from i * batch_size to (i+1) * batch_size
print("Computing batch " + str(i+1) + "/" + str(int(len(X_test)/batch_size)) + "...")
tic = time.time()
predts = classifier.predict(X_test[i * batch_size:(i+1) * batch_size], k)
toc = time.time()
predictions = predictions + list(predts)
# print("Len of predictions: " + str(len(predictions)))
print("Completed this batch in " + str(toc-tic) + " Secs.")
print("Completed predicting the test data.")
# In[ ]:
# runs for 13 minutes
# uncomment predict lines to predict second half of test data
for i in range(int(len(X_test)/(2*batch_size)), int(len(X_test)/batch_size)):
# predicts from i * batch_size to (i+1) * batch_size
print("Computing batch " + str(i+1) + "/" + str(int(len(X_test)/batch_size)) + "...")
tic = time.time()
#predts = classifier.predict(X_test[i * batch_size:(i+1) * batch_size], k)
toc = time.time()
#predictions = predictions + list(predts)
# print("Len of predictions: " + str(len(predictions)))
print("Completed this batch in " + str(toc-tic) + " Secs.")
print("Completed predicting the test data.")
# After predicting and saving results in Python array, we dump our predictions to a csv file
# named `predictions.csv` which gets an accuracy of 97.114% on public leaderboard.
# In[ ]:
out_file = open("predictions.csv", "w")
out_file.write("ImageId,Label\n")
for i in range(len(predictions)):
out_file.write(str(i+1) + "," + str(int(predictions[i])) + "\n")
out_file.close()
| [
"[email protected]"
]
| |
5bf16079be9f974bae6d83fe67c63316eeb74485 | 76f160538f4de5466929c6f66d4fa0ed021d10d0 | /lux/extensions/angular/__init__.py | 352f1bc771234cdf9cdf19fcc83245ce8f06a32f | [
"BSD-3-Clause"
]
| permissive | tazo90/lux | cd60e5364f2af486a41f58935eec38c79a2acef5 | 6fc8994cfaa9379ea3a0c6ce7b076d48e6b1759e | refs/heads/master | 2021-01-15T12:50:24.280897 | 2015-05-07T07:15:13 | 2015-05-07T07:15:13 | 35,214,757 | 0 | 0 | null | 2015-05-07T10:50:57 | 2015-05-07T10:50:56 | null | UTF-8 | Python | false | false | 5,613 | py | '''
This extension does not provide any middleware but it is required
when using :ref:`lux.js <jsapi>` javascript module and
provides the link between AngularJS_ and Python.
**Required extensions**: :mod:`lux.extensions.ui`
Usage
=========
Include ``lux.extensions.angular`` into the :setting:`EXTENSIONS` list in your
:ref:`config file <parameters>`::
EXTENSIONS = [
...
'lux.extensions.ui',
'lux.extensions.angular'
...
]
HTML5_NAVIGATION = True
.. _AngularJS: https://angularjs.org/
.. _`ui-router`: https://github.com/angular-ui/ui-router
'''
import lux
from lux import Parameter, RouterParam
from pulsar.apps.wsgi import MediaMixin, Html, route
from pulsar.utils.httpurl import urlparse
from pulsar.utils.html import escape
from .ui import add_css
def add_ng_modules(doc, modules):
if modules:
ngmodules = set(doc.jscontext.get('ngModules', ()))
ngmodules.update(modules)
doc.jscontext['ngModules'] = tuple(ngmodules)
class Extension(lux.Extension):
_config = [
Parameter('HTML5_NAVIGATION', False,
'Enable Html5 navigation', True),
Parameter('ANGULAR_VIEW_ANIMATE', False,
'Enable Animation of ui-router views.'),
Parameter('NGMODULES', [], 'Angular module to load')
]
def on_html_document(self, app, request, doc):
router = html_router(request.app_handler)
if not router:
return
#
add_ng_modules(doc, app.config['NGMODULES'])
# Use HTML5 navigation and angular router
if app.config['HTML5_NAVIGATION']:
root = angular_root(app, router)
doc.body.data({'ng-model': 'page',
'ng-controller': 'Page',
'page': ''})
doc.head.meta.append(Html('base', href="/"))
if not hasattr(root, '_angular_sitemap'):
root._angular_sitemap = {'states': [], 'pages': {}}
add_to_sitemap(root._angular_sitemap, app, doc, root)
doc.jscontext.update(root._angular_sitemap)
doc.jscontext['page'] = router.state
else:
add_ng_modules(doc, router.uimodules)
def context(self, request, context):
router = html_router(request.app_handler)
if request.config['HTML5_NAVIGATION'] and router:
root = angular_root(request.app, router)
pages = request.html_document.jscontext['pages']
page = pages.get(router.state)
context['html_main'] = self.uiview(request, context, page)
def uiview(self, request, context, page):
'''Wrap the ``main`` html with a ``ui-view`` container.
Add animation class if specified in :setting:`ANGULAR_VIEW_ANIMATE`.
'''
app = request.app
main = context.get('html_main', '')
if 'templateUrl' in page or 'template' in page:
main = Html('div', main, cn='hidden', id="seo-view")
div = Html('div', main, cn='angular-view')
animate = app.config['ANGULAR_VIEW_ANIMATE']
if animate:
add_ng_modules(request.html_document, ('ngAnimate',))
div.addClass(animate)
div.data('ui-view', '')
return div.render()
def html_router(router):
if isinstance(router, lux.HtmlRouter):
return router
def angular_root(app, router):
'''The root angular router
'''
if not hasattr(router, '_angular_root'):
if angular_compatible(app, router, router.parent):
router._angular_root = angular_root(app, router.parent)
else:
router._angular_root = router
return router._angular_root
def angular_compatible(app, router1, router2):
router1 = html_router(router1)
router2 = html_router(router2)
if router1 and router2:
templ1 = router1.get_html_body_template(app)
templ2 = router2.get_html_body_template(app)
return templ1 == templ2
return False
def router_href(app, route):
url = '/'.join(_angular_route(route))
if url:
url = '/%s' % url if route.is_leaf else '/%s/' % url
else:
url = '/'
site_url = app.config['SITE_URL']
if site_url:
p = urlparse(site_url + url)
return p.path
else:
return url
def _angular_route(route):
for is_dynamic, val in route.breadcrumbs:
if is_dynamic:
c = route._converters[val]
yield '*%s' % val if c.regex == '.*' else ':%s' % val
else:
yield val
def add_to_sitemap(sitemap, app, doc, router, parent=None, angular=None):
# path for the current router
path = router_href(app, router.full_route)
# Set the angukar router
if hasattr(router, 'angular_page'):
angular = router
name = router.name
if parent:
name = '%s_%s' % (parent, name)
router.state = name
page = {'url': path, 'name': name}
if angular:
angular.angular_page(app, router, page)
sitemap['states'].append(name)
sitemap['pages'][name] = page
add_ng_modules(doc, router.uimodules)
#
# Loop over children routes
for child in router.routes:
add_to_sitemap(sitemap, app, doc, child, name, angular)
# Add redirect to folder page if required
return
if path.endswith('/') and path != '/':
rpath = path[:-1]
if rpath not in sitemap['pages']:
page = {'url': rpath,
'redirectTo': path}
sitemap['states'].append(rpath)
sitemap['pages'][rpath] = page
| [
"[email protected]"
]
| |
c111258136eed2183508d6806edea26b81e1751d | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/brosner-django-groups/allPythonContent.py | a04d659dfdcf6b7ba8ded1c96006ded0510da779 | []
| no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,204 | py | __FILENAME__ = base
import datetime
import warnings
from django.db import models
from django.db.models.options import FieldDoesNotExist
from django.db.models.query import QuerySet
from django.db.models.sql.constants import LOOKUP_SEP
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
def _get_queryset(klass):
"""
Returns a QuerySet from a Model, Manager, or QuerySet. Created to make
get_object_or_404 and get_list_or_404 more DRY.
Pulled from django.shortcuts
"""
if isinstance(klass, QuerySet):
return klass
elif isinstance(klass, models.Manager):
manager = klass
else:
manager = klass._default_manager
return manager.all()
class GroupAware(models.Model):
"""
A mixin abstract base model to use on models you want to make group-aware.
"""
group_content_type = models.ForeignKey(ContentType, null=True, blank=True)
group_object_id = models.PositiveIntegerField(null=True, blank=True)
group = generic.GenericForeignKey("group_content_type", "group_object_id")
class Meta:
abstract = True
class GroupBase(models.Model):
slug_attr = "slug"
class Meta(object):
abstract = True
def member_queryset(self):
if not hasattr(self, "_members_field"):
# look for the common case of a m2m named members (in some cases
# the related_name of the user FK on the intermediary model might
# be named members and we need User instances)
try:
field = self._meta.get_field("members")
except FieldDoesNotExist:
raise NotImplementedError("You must define a member_queryset for %s" % str(self.__class__))
else:
self._members_field = field
else:
field = self._members_field
if isinstance(field, models.ManyToManyField) and issubclass(field.rel.to, User):
return self.members.all()
else:
raise NotImplementedError("You must define a member_queryset for %s" % str(self.__class__))
def user_is_member(self, user):
return user in self.member_queryset()
def _group_gfk_field(self, model, join=None, field_name=None):
opts = model._meta
if field_name is None:
field_name = "group"
if join is not None:
# see if we can get the model where the field actually lives
parts = join.split(LOOKUP_SEP)
for name in parts:
f, model, direct, m2m = opts.get_field_by_name(name)
# not handling the model is not None case (proxied models I think)
if direct:
if m2m or f.rel:
opts = f.rel.to._meta
else:
break
else:
opts = f.opts
try:
field = [f for f in opts.virtual_fields if f.name == field_name][0]
except IndexError:
from django.db.models.loading import cache as app_cache
model = app_cache.get_model(opts.app_label, opts.module_name)
raise LookupError("Unable to find generic foreign key named '%s' "
"on %r\nThe model may have a different name or it does not "
"exist." % (
field_name,
model,
))
return field
def lookup_params(self, model):
content_type = ContentType.objects.get_for_model(self)
group_gfk = self._group_gfk_field(model)
params = {
group_gfk.fk_field: self.id,
group_gfk.ct_field: content_type,
}
return params
def content_objects(self, queryable, join=None, gfk_field=None):
queryset = _get_queryset(queryable)
content_type = ContentType.objects.get_for_model(self)
group_gfk = self._group_gfk_field(queryset.model, join=join, field_name=gfk_field)
if join:
lookup_kwargs = {
"%s__%s" % (join, group_gfk.fk_field): self.id,
"%s__%s" % (join, group_gfk.ct_field): content_type,
}
else:
lookup_kwargs = {
group_gfk.fk_field: self.id,
group_gfk.ct_field: content_type,
}
content_objects = queryset.filter(**lookup_kwargs)
return content_objects
def associate(self, instance, commit=True, gfk_field=None):
group_gfk = self._group_gfk_field(instance, field_name=gfk_field)
setattr(instance, group_gfk.fk_field, self.id)
setattr(instance, group_gfk.ct_field, ContentType.objects.get_for_model(self))
if commit:
instance.save()
return instance
def get_url_kwargs(self):
kwargs = {}
if hasattr(self, "group") and self.group:
kwargs.update(self.group.get_url_kwargs())
slug = getattr(self, self.slug_attr)
kwargs.update({"%s_slug" % self._meta.object_name.lower(): slug})
return kwargs
class Group(GroupBase, GroupAware):
"""
a group is a group of users with a common interest
"""
slug = models.SlugField(_("slug"), unique=True)
name = models.CharField(_("name"), max_length=80, unique=True)
creator = models.ForeignKey(User, verbose_name=_("creator"), related_name="%(class)s_created")
created = models.DateTimeField(_("created"), default=datetime.datetime.now)
description = models.TextField(_("description"))
def __unicode__(self):
return self.name
class Meta(object):
abstract = True
class GroupScopedId(models.Model):
"""
a model to store scoped IDs for tasks (specific to a group)
"""
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.IntegerField(null=True, blank=True)
group = generic.GenericForeignKey()
scoped_number = models.IntegerField()
class Meta:
abstract = True
unique_together = (("content_type", "object_id", "scoped_number"),)
########NEW FILE########
__FILENAME__ = bridge
import sys
from django.shortcuts import render_to_response
from django.conf.urls.defaults import patterns, url as urlpattern
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver, reverse as dreverse
from django.contrib.contenttypes.models import ContentType
class ContentBridge(object):
def __init__(self, group_model, content_app_name=None, urlconf_aware=True):
self.parent_bridge = None
self.group_model = group_model
self.urlconf_aware = urlconf_aware
if content_app_name is None:
self.content_app_name = group_model._meta.app_label
else:
self.content_app_name = content_app_name
# attach the bridge to the model itself. we need to access it when
# using groupurl to get the correct prefix for URLs for the given
# group.
self.group_model.content_bridge = self
def include_urls(self, module_name, url_prefix, kwargs=None):
if kwargs is None:
kwargs = {}
prefix = self.content_app_name
__import__(module_name)
module = sys.modules[module_name]
if hasattr(module, "bridge"):
module.bridge.parent_bridge = self
urls = []
for url in module.urlpatterns:
extra_kwargs = {"bridge": self}
if isinstance(url, RegexURLPattern):
regex = url_prefix + url.regex.pattern.lstrip("^")
if url._callback:
callback = url._callback
else:
callback = url._callback_str
if url.name:
name = url.name
else:
# @@@ this seems sketchy
name = ""
name = "%s_%s" % (prefix, name)
extra_kwargs.update(kwargs)
extra_kwargs.update(url.default_args)
urls.append(urlpattern(regex, callback, extra_kwargs, name))
else:
# i don't see this case happening much at all. this case will be
# executed likely if url is a RegexURLResolver. nesting an include
# at the content object level may not be supported, but maybe the
# code below works. i don't have time to test it, but if you are
# reading this because something is broken then give it a shot.
# then report back :-)
raise Exception("ContentBridge.include_urls does not support a nested include.")
# regex = url_prefix + url.regex.pattern.lstrip("^")
# urlconf_name = url.urlconf_name
# extra_kwargs.update(kwargs)
# extra_kwargs.update(url.default_kwargs)
# final_urls.append(urlpattern(regex, [urlconf_name], extra_kwargs))
return patterns("", *urls)
@property
def _url_name_prefix(self):
if self.urlconf_aware:
parent_prefix = ""
if self.parent_bridge is not None:
parent_prefix = self.parent_bridge._url_name_prefix
return "%s%s_" % (parent_prefix, self.content_app_name)
else:
return ""
def reverse(self, view_name, group, kwargs=None):
if kwargs is None:
kwargs = {}
final_kwargs = {}
final_kwargs.update(group.get_url_kwargs())
final_kwargs.update(kwargs)
return dreverse("%s%s" % (self._url_name_prefix, view_name), kwargs=final_kwargs)
def render(self, template_name, context, context_instance=None):
# @@@ this method is practically useless -- consider removing it.
ctype = ContentType.objects.get_for_model(self.group_model)
return render_to_response([
"%s/%s/%s" % (ctype.app_label, self.content_app_name, template_name),
"%s/%s" % (self.content_app_name, template_name),
], context, context_instance=context_instance)
def group_base_template(self, template_name="content_base.html"):
return "%s/%s" % (self.content_app_name, template_name)
def get_group(self, kwargs):
lookup_params = {}
if self.parent_bridge is not None:
parent_group = self.parent_bridge.get_group(kwargs)
lookup_params.update(parent_group.lookup_params(self.group_model))
else:
parent_group = None
slug = kwargs.pop("%s_slug" % self.group_model._meta.object_name.lower())
lookup_params.update({
"slug": slug,
})
group = self.group_model._default_manager.get(**lookup_params)
if parent_group:
# cache parent_group on GFK to prevent database hits later on
group.group = parent_group
return group
########NEW FILE########
__FILENAME__ = helpers
from django.db import connection, transaction
qn = connection.ops.quote_name
def generate_next_scoped_id(content_object, scoped_id_model):
"""
generates an ID unique to a content_object scoped in a group (if it has
one).
"""
kwargs = {}
if content_object.group:
kwargs.update({
"content_type": content_object.content_type,
"object_id": content_object.object_id,
})
get_or_create = scoped_id_model._default_manager.get_or_create
scoped_id, created = get_or_create(**dict(kwargs, **{
"defaults": {
"scoped_number": 1,
}
}))
if not created:
sql = """
UPDATE %(table_name)s
SET scoped_number = scoped_number + 1
""" % {"table_name": qn(scoped_id_model._meta.db_table)}
if content_object.group:
sql += """
WHERE
content_type_id = %(content_type_id)s AND
object_id = %(object_id)s
""" % {
"content_type_id": kwargs["content_type"].pk,
"object_id": kwargs["object_id"],
}
try:
try:
transaction.enter_transaction_management()
transaction.managed(True)
cursor = connection.cursor()
cursor.execute(sql)
# we modified data, mark dirty
transaction.set_dirty()
scoped_id = scoped_id_model._default_manager.get(pk=scoped_id.pk)
transaction.commit()
except:
transaction.rollback()
raise
finally:
transaction.leave_transaction_management()
return scoped_id.scoped_number
########NEW FILE########
__FILENAME__ = internals
import copy
class GroupDummy(object):
def __nonzero__(self):
return False
class GroupRequestHelper(object):
def __init__(self, request, group):
self.request = request
self.group = group
def __deepcopy__(self, memo):
obj = copy.copy(self)
for k, v in self.__dict__.iteritems():
if k == "request":
continue
setattr(obj, k, copy.deepcopy(v, memo))
obj.request = self.request
memo[id(self)] = obj
return obj
def user_is_member(self):
if not self.request.user.is_authenticated():
is_member = False
else:
if self.group:
is_member = self.group.user_is_member(self.request.user)
else:
is_member = True
return is_member
########NEW FILE########
__FILENAME__ = middleware
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.utils.functional import curry
from groups.internals import GroupDummy, GroupRequestHelper
class GroupAwareMiddleware(object):
def process_view(self, request, view, view_args, view_kwargs):
bridge = view_kwargs.pop("bridge", None)
if bridge:
try:
group = bridge.get_group(view_kwargs)
except ObjectDoesNotExist:
raise Http404
else:
group = GroupDummy()
# attach a request helper
group.request = GroupRequestHelper(request, group)
request.group = group
request.bridge = bridge
return None
########NEW FILE########
__FILENAME__ = group_tags
from django import template
from django.utils.encoding import smart_str
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models import get_model
from django.db.models.query import QuerySet
register = template.Library()
class GroupURLNode(template.Node):
def __init__(self, view_name, group, kwargs, asvar):
self.view_name = view_name
self.group = group
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
url = ""
group = self.group.resolve(context)
kwargs = {}
for k, v in self.kwargs.items():
kwargs[smart_str(k, "ascii")] = v.resolve(context)
if group:
bridge = group.content_bridge
try:
url = bridge.reverse(self.view_name, group, kwargs=kwargs)
except NoReverseMatch:
if self.asvar is None:
raise
else:
try:
url = reverse(self.view_name, kwargs=kwargs)
except NoReverseMatch:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ""
else:
return url
class ContentObjectsNode(template.Node):
def __init__(self, group_var, model_name_var, gfk_field_var, context_var):
self.group_var = template.Variable(group_var)
self.model_name_var = template.Variable(model_name_var)
if gfk_field_var is not None:
self.gfk_field_var = template.Variable(gfk_field_var)
else:
self.gfk_field_var = None
self.context_var = context_var
def render(self, context):
group = self.group_var.resolve(context)
model_name = self.model_name_var.resolve(context)
if self.gfk_field_var is not None:
gfk_field = self.gfk_field_var.resolve(context)
else:
gfk_field = None
if isinstance(model_name, QuerySet):
model = model_name
else:
app_name, model_name = model_name.split(".")
model = get_model(app_name, model_name)
context[self.context_var] = group.content_objects(model, gfk_field=gfk_field)
return ""
class ObjectGroupUrlNode(template.Node):
def __init__(self, obj, group, asvar):
self.obj_var = template.Variable(obj)
self.group = group
self.asvar = asvar
def render(self, context):
url = ""
obj = self.obj_var.resolve(context)
group = self.group.resolve(context)
try:
url = obj.get_absolute_url(group)
except NoReverseMatch:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ""
else:
return url
@register.tag
def groupurl(parser, token):
bits = token.contents.split()
tag_name = bits[0]
if len(bits) < 3:
raise template.TemplateSyntaxError("'%s' takes at least two arguments"
" (path to a view and a group)" % tag_name)
view_name = bits[1]
group = parser.compile_filter(bits[2])
args = []
kwargs = {}
asvar = None
if len(bits) > 3:
bits = iter(bits[3:])
for bit in bits:
if bit == "as":
asvar = bits.next()
break
else:
for arg in bit.split(","):
if "=" in arg:
k, v = arg.split("=", 1)
k = k.strip()
kwargs[k] = parser.compile_filter(v)
elif arg:
raise template.TemplateSyntaxError("'%s' does not support non-kwargs arguments." % tag_name)
return GroupURLNode(view_name, group, kwargs, asvar)
@register.tag
def content_objects(parser, token):
"""
Basic usage::
{% content_objects group "tasks.Task" as tasks %}
or if you need to specify a custom generic foreign key field (default is
group)::
{% content_objects group "tasks.Task" "content_object" as tasks %}
"""
bits = token.split_contents()
if len(bits) not in [5, 6]:
raise template.TemplateSyntaxError("'%s' requires five or six arguments." % bits[0])
else:
if len(bits) == 5:
return ContentObjectsNode(bits[1], bits[2], None, bits[4])
else:
return ContentObjectsNode(bits[1], bits[2], bits[3], bits[5])
@register.tag
def object_group_url(parser, token):
"""
given an object and an optional group, call get_absolute_url passing the
group variable::
{% object_group_url task group %}
"""
bits = token.contents.split()
tag_name = bits[0]
if len(bits) < 3:
raise template.TemplateSyntaxError("'%s' takes at least two arguments"
" (object and a group)" % tag_name)
obj = bits[1]
group = parser.compile_filter(bits[2])
if len(bits) > 3:
if bits[3] != "as":
raise template.TemplateSyntaxError("'%s' requires the forth"
" argument to be 'as'" % tag_name)
try:
asvar = bits[4]
except IndexError:
raise template.TemplateSyntaxError("'%s' requires an argument"
" after 'as'" % tag_name)
return ObjectGroupUrlNode(obj, group, asvar)
########NEW FILE########
| [
"[email protected]"
]
| |
4e73dcfed4e559a0188715cb13281b634e692954 | d5ad13232e3f1ced55f6956bc4cbda87925c8085 | /cc_mcc_seq/Results/indel/6_point_gene_test/7_1_sum_indel.point_ttest.py | 9e3c83f5cfe1896677303ff1a71af85007af1b69 | []
| no_license | arvin580/SIBS | c0ba9a8a41f59cb333517c286f7d80300b9501a2 | 0cc2378bf62359ec068336ea4de16d081d0f58a4 | refs/heads/master | 2021-01-23T21:57:35.658443 | 2015-04-09T23:11:34 | 2015-04-09T23:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | dict1=dict()
inFile=open('sum_indel.exome_combined.sorted.pass012.new')
for line in inFile :
line=line.strip()
fields=line.split('\t')
key=fields[21]+':'+fields[22]
dict1.setdefault(key,[0]*21)
for i,item in enumerate(fields[-20:]) :
dict1[key][1+i]+=int(item)
dict1[key][0]+=int(item)
inFile.close()
d=dict1.items()
d.sort(cmp=lambda x,y:cmp(x[1][0],y[1][0]),reverse=True)
ouFile=open('sum_indel.exome_combined.sorted.pass012.new.point_ttest','w')
for item in d :
ouFile.write(item[0]+'\t')
ouFile.write('\t'.join([str(i) for i in item[1]])+'\n')
ouFile.close()
| [
"[email protected]"
]
| |
1e477aa48e9e96e3b8f763ccda159afce0079f5c | 6bb45c5892b4c9692dcc44116fb73dc9e7ab90ff | /sagemaker-training-compiler/huggingface/pytorch_single_gpu_single_node/roberta-base/scripts/fine_tune_with_huggingface.py | 4e30ae6ee9d41e4f62b60bf46c8534036b198b62 | [
"Apache-2.0",
"BSD-2-Clause"
]
| permissive | aws/amazon-sagemaker-examples | 8359afe544e873662bda5b8d2b07399c437213c9 | 43dae4b28531cde167598f104f582168b0a4141f | refs/heads/main | 2023-08-26T04:42:52.342776 | 2023-08-25T14:37:19 | 2023-08-25T14:37:19 | 107,937,815 | 4,797 | 3,519 | Apache-2.0 | 2023-09-14T19:47:03 | 2017-10-23T05:55:22 | Jupyter Notebook | UTF-8 | Python | false | false | 3,780 | py | from transformers import (
AutoModelForSequenceClassification,
Trainer,
TrainingArguments,
AutoTokenizer,
TrainerCallback,
)
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from datasets import load_from_disk
import random
import logging
import sys
import argparse
import os
import torch
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--train_batch_size", type=int, default=32)
parser.add_argument("--eval_batch_size", type=int, default=64)
parser.add_argument("--warmup_steps", type=int, default=500)
parser.add_argument("--model_name", type=str)
parser.add_argument("--learning_rate", type=float, default=5e-5)
# Data, model, and output directories
parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"])
parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"])
parser.add_argument("--training_dir", type=str, default=os.environ["SM_CHANNEL_TRAIN"])
parser.add_argument("--test_dir", type=str, default=os.environ["SM_CHANNEL_TEST"])
args, _ = parser.parse_known_args()
os.environ["GPU_NUM_DEVICES"] = args.n_gpus
# Set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.getLevelName("INFO"),
handlers=[logging.StreamHandler(sys.stdout)],
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
# load datasets
train_dataset = load_from_disk(args.training_dir)
test_dataset = load_from_disk(args.test_dir)
# compute metrics function for binary classification
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="binary")
acc = accuracy_score(labels, preds)
return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
# download model from model hub
model = AutoModelForSequenceClassification.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
# define training args
training_args = TrainingArguments(
output_dir=args.model_dir,
num_train_epochs=args.epochs,
per_device_train_batch_size=args.train_batch_size,
per_device_eval_batch_size=args.eval_batch_size,
warmup_steps=args.warmup_steps,
logging_dir=f"{args.output_data_dir}/logs",
learning_rate=args.learning_rate,
fp16=True,
dataloader_drop_last=True,
disable_tqdm=True,
evaluation_strategy="no",
save_strategy="no",
save_total_limit=1,
logging_strategy="epoch",
)
# create Trainer instance
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=tokenizer,
)
# train model
trainer.train()
# evaluate model
eval_result = trainer.evaluate(eval_dataset=test_dataset)
# writes eval result to file which can be accessed later in s3 ouput
with open(os.path.join(args.output_data_dir, "eval_results.txt"), "w") as writer:
print(f"***** Eval results *****")
for key, value in sorted(eval_result.items()):
writer.write(f"{key} = {value}\n")
# Saves the model to s3
trainer.save_model(args.model_dir)
| [
"[email protected]"
]
| |
004d804435268fc547e669d76c9db9b3c2f5abba | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/vns/rscdevtochassis.py | 59c49d0334a738a7fb70f1c19452accb42efdf75 | []
| no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,288 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsCDevToChassis(Mo):
"""
"""
meta = NamedSourceRelationMeta("cobra.model.vns.RsCDevToChassis", "cobra.model.vns.Chassis")
meta.targetNameProps["name"] = "tnVnsChassisName"
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "vnsRsCDevToChassis"
meta.rnFormat = "rscDevToChassis"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Chassis for CDevs"
meta.writeAccessMask = 0x4000000000000001
meta.readAccessMask = 0x4000000000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.parentClasses.add("cobra.model.vns.CDev")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rscDevToChassis', False),
]
prop = PropMeta("str", "annotation", "annotation", 38208, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 40347, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 18406, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 18360, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 6169
prop.defaultValueStr = "vnsChassis"
prop._addConstant("unspecified", "unspecified", 0)
prop._addConstant("vnsChassis", None, 6169)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnVnsChassisName", "tnVnsChassisName", 18359, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnVnsChassisName", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("CDevToNwIf", "Physical Interfaces", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
70a949a43592c87b808f514a6478307f2cb8b4ca | 4e044ab3073f5893f2eae33931ab5079b282817c | /00_Startcamp/02_Day/05_reverse_content.py | 74ce84e6ffdc72fd94c97e830417cfc5510d468f | []
| no_license | whiteblue0/Startcamp | e5e8c30476eda7a30c3daae4bdc018f223da74e8 | 7ddebe8be878a8550d99fcc572666b963dda583d | refs/heads/master | 2023-01-27T18:36:28.013135 | 2020-09-12T06:21:12 | 2020-09-12T06:21:12 | 195,809,764 | 0 | 0 | null | 2023-01-07T11:25:23 | 2019-07-08T12:42:40 | Jupyter Notebook | UTF-8 | Python | false | false | 357 | py | # #역순으로 출력 방법1
# with open('with_ssafy.txt','r') as f:
# lines = f.readlines()
# for line in lines:
# print(line.strip()[::-1])
#역순 출력 방법2
with open('writelines_ssafy.txt','r') as f:
lines = f.readlines()
lines.reverse()
with open('reverse_ssafy.txt','w') as f:
for line in lines:
f.write(line) | [
"[email protected]"
]
| |
113a035fa94bf5dde84455ba6efb5023c23404e9 | c6b1919498776cfc408076246390e2bba56f4c4e | /client/migrations/0025_auto_20170327_1353.py | 02d0f21d9aa306d7b21a4cf93536d1746cfc8ceb | []
| no_license | huozhihui/devops_tool | f2ceaf7f1828853e43859645f5ab36a00b0fa7df | 0eb7b4a14203e30bb2c262075864cec0db21829f | refs/heads/master | 2020-05-20T19:02:47.855055 | 2017-04-18T05:25:59 | 2017-04-18T05:25:59 | 84,509,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 13:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('client', '0024_auto_20170315_0945'),
]
operations = [
migrations.AlterField(
model_name='rolemanage',
name='timeout',
field=models.IntegerField(default=180, verbose_name='\u8d85\u65f6\u65f6\u957f(s)'),
),
]
| [
"[email protected]"
]
| |
d9e6416e228b806bc80ed10e06e4aabd87a059cb | 523226fe09d7b5d3b25c2c635ad8f3992462fe34 | /keras/saving/hdf5_format_test.py | 8594ff72fecb16715821f331d2eea5322de0560d | [
"Apache-2.0"
]
| permissive | ivallesp/keras | 1a03be04c904c8c2fabc8336c4a09c8b937b6ecc | 1a35ff2788b5e6880ceb8af82e1a8d5f72d0f76f | refs/heads/master | 2021-06-22T13:19:58.681658 | 2020-12-09T23:38:16 | 2020-12-09T23:38:16 | 155,445,708 | 0 | 0 | NOASSERTION | 2018-10-30T19:39:06 | 2018-10-30T19:39:05 | null | UTF-8 | Python | false | false | 49,940 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for model saving in the HDF5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import os
import shutil
import tempfile
import uuid
from absl.testing import parameterized
import numpy as np
import keras
from keras import combinations
from keras import keras_parameterized
from keras import optimizer_v1
from keras import optimizers
from keras import testing_utils
from keras.engine import training
from keras.saving import hdf5_format
from tensorflow.python.platform import tf_logging as logging
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestWeightSavingAndLoading(tf.test.TestCase, parameterized.TestCase):
@keras_parameterized.run_with_all_weight_formats
def test_weight_loading(self):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'saved_model')
save_format = testing_utils.get_save_format()
with self.cached_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
model.save_weights(saved_model_dir, save_format=save_format)
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRUV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTMV1(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = hdf5_format.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = hdf5_format.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
@parameterized.named_parameters(
('gru', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5)
}),
('gru_with_reset_after', keras.layers.GRU, {
'units': 2,
'input_shape': (3, 5),
'reset_after': True
}),
('lstm', keras.layers.LSTM, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnngru', keras.layers.CuDNNGRU, {
'units': 2,
'input_shape': (3, 5)
}),
('cudnnlstm', keras.layers.CuDNNLSTM, {
'units': 2,
'input_shape': (3, 5)
}))
def test_preprocess_weights_for_loading_rnn_should_be_idempotent(
self, layer_class, layer_args):
with self.cached_session():
layer = layer_class(**layer_args)
layer.build(input_shape=layer_args.get('input_shape'))
weights1 = layer.get_weights()
weights2 = hdf5_format.preprocess_weights_for_loading(
layer, weights1)
_ = [
self.assertAllClose(x, y, rtol=1e-05)
for (x, y) in zip(weights1, weights2)
]
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
@keras_parameterized.run_with_all_saved_model_formats
def test_nested_model_weight_loading(self):
save_format = testing_utils.get_save_format()
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
saved_model_dir = os.path.join(temp_dir, 'saved_model')
batch_size = 5
shape = (None, None, 3)
with self.cached_session():
def gen_model():
def seq_model():
model = keras.models.Sequential([
keras.layers.Conv2D(3, 1, input_shape=shape),
keras.layers.BatchNormalization()])
return model
x = inner_inputs = keras.layers.Input((None, None, 3))
x = seq_model()(x)
x = seq_model()(x)
inner_model = keras.models.Model(inner_inputs, x)
inputs = keras.layers.Input(shape)
return keras.models.Model(inputs, inner_model(inputs))
model = gen_model()
x = np.random.random((batch_size, 1, 1, 3))
ref_y = model.predict(x)
model.save_weights(saved_model_dir, save_format=save_format)
model = gen_model()
model.load_weights(saved_model_dir)
y = model.predict(x)
self.assertAllClose(y, ref_y)
def test_sequential_weight_loading_group_name_with_incorrect_length(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, use_bias=False,
input_dim=input_dim, name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegex(
ValueError, r'Layer #0 \(named \"d1\"\) expects 1 '
r'weight\(s\), but the saved weights have 2 '
r'element\(s\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose(keras.backend.get_value(ref_model.layers[1].kernel),
keras.backend.get_value(model.layers[1].kernel))
def test_sequential_weight_loading_group_name_with_incorrect_shape(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
num_classes = 2
with tf.Graph().as_default(), self.cached_session():
ref_model = keras.models.Sequential()
ref_model.add(keras.layers.Dense(num_hidden, input_dim=input_dim,
name='d1'))
ref_model.add(keras.layers.Dense(num_classes, name='d2'))
ref_model.compile(loss=keras.losses.MSE,
optimizer=optimizer_v1.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
f_ref_model = h5py.File(h5_path, 'w')
keras.backend.set_value(ref_model.layers[1].bias, [3.5] * num_classes)
hdf5_format.save_weights_to_hdf5_group(f_ref_model, ref_model.layers)
f_model = h5py.File(h5_path, 'r')
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden + 5, input_dim=input_dim,
name='d1'))
model.add(keras.layers.Dense(num_classes, name='d2'))
model.compile(loss=keras.losses.MSE,
optimizer=optimizer_v1.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
with self.assertRaisesRegex(
ValueError, r'Layer #0 \(named "d1"\), weight '
r'<tf\.Variable \'d1_1\/kernel:0\' '
r'shape=\(3, 10\) dtype=float32> has '
r'shape \(3, 10\), but the saved weight has '
r'shape \(3, 5\)\.'):
hdf5_format.load_weights_from_hdf5_group_by_name(f_model, model.layers)
hdf5_format.load_weights_from_hdf5_group_by_name(
f_model, model.layers, skip_mismatch=True)
self.assertAllClose([3.5] * num_classes,
keras.backend.get_value(model.layers[1].bias))
@keras_parameterized.run_with_all_saved_model_formats
class TestWholeModelSaving(keras_parameterized.TestCase):
def _save_model_dir(self, dirname='saved_model'):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
return os.path.join(temp_dir, dirname)
def _assert_same_weights_and_metrics(self, model, loaded_model):
"""Checks that the loaded weights and metrics are the same as the original.
Args:
model: original model
loaded_model: loaded model
"""
self.assertAllClose(model.weights, loaded_model.weights)
if loaded_model.optimizer:
if testing_utils.get_save_format() == 'tf':
# TODO(b/153110928): Keras TF format doesn't restore optimizer weights
# currently.
return
self.assertAllClose(model.optimizer.weights,
loaded_model.optimizer.weights)
# In V1/Graph mode, the model isn't built, so the metrics are not loaded
# immediately (requires model to be called on some data before building
# metrics).
check_metrics = tf.__internal__.tf2.enabled() and tf.executing_eagerly()
if check_metrics:
self.assertAllEqual([m.name for m in model.metrics],
[m.name for m in loaded_model.metrics])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_save_and_load(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
save_kwargs = testing_utils.get_save_kwargs()
if ((save_format == 'h5' or not save_kwargs.get('save_traces', True)) and
testing_utils.get_model_type() == 'subclass'):
# HDF5 format currently does not allow saving subclassed models.
# When saving with `save_traces=False`, the subclassed model must have a
# get_config/from_config, which the autogenerated model does not have.
return
with self.cached_session():
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(2),
keras.layers.RepeatVector(3),
keras.layers.TimeDistributed(keras.layers.Dense(3))],
input_shape=(3,))
model.compile(
loss=keras.losses.MSE,
optimizer=keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=tf.constant(0.2)),
],
weighted_metrics=[
keras.metrics.categorical_crossentropy,
keras.metrics.CategoricalCrossentropy(
name='cce', label_smoothing=tf.constant(0.2)),
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(
model, saved_model_dir, save_format=save_format,
**save_kwargs)
loaded_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, loaded_model)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
eval_out = model.evaluate(x, y)
eval_out2 = loaded_model.evaluate(x, y)
self.assertArrayNear(eval_out, eval_out2, 0.001)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_sequential_model_saving_without_input_shape(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(
loss=keras.losses.MSE,
optimizer='rmsprop',
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(name='cat_acc')
],
weighted_metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy(name='cat_acc2')
],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
model.save(saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_sequential_model_saving_without_compile(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
# Save the model without any compilation or training.
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with tf.Graph().as_default(), self.cached_session():
# test with custom optimizer, loss
class CustomOp(optimizer_v1.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(
saved_model_dir,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_with_tf_optimizer(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=tf.compat.v1.train.AdadeltaOptimizer(0.1),
metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_right_after_compilation(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
if not tf.compat.v1.executing_eagerly_outside_functions():
model._make_train_function()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_lambda_numpy_array_arguments(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
if h5py is None:
self.skipTest('h5py required to run this test')
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
def test_saving_model_with_long_layer_names(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amount of memory for every item, it increases the memory
# requirements substantially.
x = keras.Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = keras.layers.Dense(2, name='dense_%d' % (i,))(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(
'adam', loss=keras.losses.MeanSquaredError(), metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if save_format in ['tf', 'tensorflow']:
return
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(saved_model_dir, 'r') as h5file:
num_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_names_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_model_with_long_weights_names(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
x = keras.Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = keras.layers.Dense(2, name='nested_model_dense_%d' % (i,))(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = keras.layers.Dense(2, name='nested_model_output' + ('x' * (2**14)))(f)
nested_model = keras.Model(inputs=[x], outputs=[f], name='nested_model')
x = keras.Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = keras.layers.Dense(2, name='outer_model_output')(f)
model = keras.Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
if save_format in ['h5', 'hdf5', 'keras']:
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(saved_model_dir, 'r') as h5file:
num_weight_arrays = len(
[attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')])
# The chunking of layer names array should have happened.
self.assertGreater(num_weight_arrays, 0)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_model_saving_to_pre_created_h5py_file(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with tf.Graph().as_default(), self.cached_session():
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
outputs = keras.layers.Dense(3)(x)
model = keras.Model(inputs, outputs)
model.compile(
loss=keras.losses.MSE,
optimizer=optimizer_v1.Adam(),
metrics=[
keras.metrics.categorical_accuracy,
keras.metrics.CategoricalAccuracy()
])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
out1 = loaded_model.predict(x)
self.assertAllClose(out, out1, atol=1e-05)
if save_format in ['tf', 'tensorflow']:
return
# Test h5 format specifically
fd, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Test non-default options in h5
with h5py.File('_', driver='core',
backing_store=False) as h5file:
keras.models.save_model(model, h5file)
loaded_model = keras.models.load_model(h5file)
out2 = loaded_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# Cleanup
os.close(fd)
os.remove(fname)
def test_model_saving_to_new_dir_path(self):
saved_model_dir = os.path.join(self._save_model_dir(), 'newdir',
'saved_model')
save_format = testing_utils.get_save_format()
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
x = np.random.random((1, 3))
out = model.predict(x)
keras.models.save_model(model, saved_model_dir, save_format=save_format)
new_model = keras.models.load_model(saved_model_dir)
self._assert_same_weights_and_metrics(model, new_model)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_model_raise_exception_with_failed_saving(self):
if h5py is None:
self.skipTest('h5py required to run this test')
saved_model_dir = self._save_model_dir()
saved_model_path = os.path.join(saved_model_dir, 'saved_model.h5')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
with self.assertRaisesRegex(OSError, 'Unable to create file'):
with h5py.File(saved_model_path, 'w'):
keras.models.save_model(model, saved_model_path)
def test_saving_constant_initializer_with_numpy(self):
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
2,
input_shape=(3,),
kernel_initializer=keras.initializers.Constant(np.ones((3, 2)))))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
keras.models.save_model(model, saved_model_dir, save_format=save_format)
model = keras.models.load_model(saved_model_dir)
def test_saving_group_naming_h5py(self):
# Test saving model with layer which name is prefix to a previous layer
# name.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
input_layer = keras.layers.Input((None, None, 3), name='test_input')
x = keras.layers.Conv2D(1, 1, name='conv1/conv')(input_layer)
x = keras.layers.Activation('relu', name='conv1')(x)
model = keras.models.Model(inputs=input_layer, outputs=x)
model.save_weights(h5_path)
model.load_weights(h5_path)
def test_primitive_attrs_contain_no_extraneous_strings(self):
if h5py is None:
self.skipTest('h5py required to run this test')
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=[2]))
model.save(saved_model_dir, save_format=save_format)
if save_format in ['tf', 'tensorflow']:
return
h5file = h5py.File(saved_model_dir, 'r')
self.assertRegex(h5file.attrs['keras_version'], r'^[\d]+\.[\d]+\.[\S]+$')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_functional_model_with_custom_loss_and_metric(self):
def _make_model():
inputs = keras.Input(shape=(4,))
x = keras.layers.Dense(8, activation='relu')(inputs)
outputs = keras.layers.Dense(3, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
custom_loss = keras.layers.Lambda(lambda x: keras.backend.sum(x * x))(x)
model.add_loss(custom_loss)
model.add_metric(custom_loss, aggregation='mean', name='custom_loss')
return model
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
with self.cached_session():
model = _make_model()
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=optimizers.gradient_descent_v2.SGD(),
metrics=[keras.metrics.SparseCategoricalCrossentropy()])
x = np.random.normal(size=(32, 4))
y = np.random.randint(0, 3, size=32)
model.train_on_batch(x, y)
evaluation_results = model.evaluate(x, y)
# Save and reload model.
model.save(saved_model_dir, save_format=save_format)
del model # Prevent misuse.
loaded_model = keras.models.load_model(saved_model_dir)
loaded_model_eval_results = loaded_model.evaluate(x, y)
# Assert all evaluation results are the same.
self.assertAllClose(evaluation_results, loaded_model_eval_results, 1e-9)
# Check correctness of the loss calculation.
self.assertAllGreater(evaluation_results, 0.)
evaluation_results = dict(
zip(loaded_model.metrics_names, evaluation_results))
self.assertNear(
evaluation_results['sparse_categorical_crossentropy'] +
evaluation_results['custom_loss'], evaluation_results['loss'], 1e-6)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_save_uncompiled_model_with_optimizer(self):
with self.cached_session() as session:
saved_model_dir = self._save_model_dir()
save_format = testing_utils.get_save_format()
model = keras.models.Sequential([keras.layers.Dense(1, input_shape=(3,))])
# Set the model's optimizer but don't compile. This can happen if the
# model is trained with a custom training loop.
model.optimizer = keras.optimizer_v2.rmsprop.RMSprop(lr=0.0001)
if not tf.executing_eagerly():
session.run([v.initializer for v in model.variables])
model.save(saved_model_dir, save_format=save_format)
if save_format in ['tf', 'tensorflow']:
loaded = keras.models.load_model(saved_model_dir)
self.assertIsInstance(loaded.optimizer,
keras.optimizer_v2.optimizer_v2.OptimizerV2)
@combinations.generate(combinations.combine(mode=['eager']))
def test_functional_model_with_getitem_op_layer(self):
inp = keras.Input(shape=(8))
out = inp[:]
model = keras.Model(
inputs=[inp],
outputs=out)
batch_size = 7
x = tf.stack([
tf.range(8) for _ in range(batch_size)])
args = [x]
expected = x[:]
self.assertAllEqual(model(args), expected)
self.assertAllEqual(model.predict(args, batch_size=batch_size), expected)
# Make sure it can be successfully saved and loaded
save_format = testing_utils.get_save_format()
saved_model_dir = self._save_model_dir()
keras.models.save_model(model, saved_model_dir, save_format=save_format)
loaded_model = keras.models.load_model(saved_model_dir)
self.assertAllEqual(loaded_model(args), expected)
self.assertAllEqual(loaded_model.predict(args, batch_size=batch_size),
expected)
# Factory functions to create models that will be serialized inside a Network.
def _make_graph_network(input_size, output_size):
inputs = keras.Input(input_size)
x = keras.layers.Dense(8, activation='relu')(inputs)
y = keras.layers.Dense(output_size)(x)
return keras.Model(inputs=inputs, outputs=y)
def _make_sequential(input_size, output_size):
del input_size
return keras.Sequential([
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(output_size),
])
def _make_sequential_built(input_size, output_size):
model = _make_sequential(input_size, output_size)
model.build((None, input_size))
return model
def _make_sequential_graph_network(input_size, output_size):
return keras.Sequential([
keras.layers.InputLayer(input_size),
keras.layers.Dense(8, activation='relu'),
keras.layers.Dense(output_size),
])
def _make_sequential_input_shape(input_size, output_size):
return keras.Sequential([
keras.layers.Dense(8, activation='relu', input_shape=(input_size,)),
keras.layers.Dense(output_size),
])
class _make_subclassed(keras.Model): # pylint: disable=invalid-name
def __init__(self, input_size, output_size):
super(_make_subclassed, self).__init__()
self._config = {'input_size': input_size, 'output_size': output_size}
self._hidden_layer = keras.layers.Dense(8, activation='relu', name='hidden')
self._logits_layer = keras.layers.Dense(output_size, name='logits')
def call(self, inputs):
x = self._hidden_layer(inputs)
return self._logits_layer(x)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config):
return cls(**config)
class _make_subclassed_built(_make_subclassed): # pylint: disable=invalid-name
def __init__(self, input_size, output_size):
super(_make_subclassed_built, self).__init__(input_size, output_size)
self.build((None, input_size))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class TestWholeModelSavingWithNesting(tf.test.TestCase, parameterized.TestCase):
"""Tests saving a whole model that contains other models."""
@parameterized.named_parameters([
('graph_network', _make_graph_network),
('sequential', _make_sequential),
('sequential_built', _make_sequential_built),
('sequential_graph_network', _make_sequential_graph_network),
('sequential_input_shape', _make_sequential_input_shape),
('subclassed', _make_subclassed),
('subclassed_built', _make_subclassed_built),
])
def test_functional(self, model_fn):
"""Tests serializing a model that uses a nested model to share weights."""
if h5py is None:
self.skipTest('h5py required to run this test')
def _make_model():
inputs = (keras.Input(shape=(4,), name='examples'),
keras.Input(shape=(4,), name='neighbors'))
base_model = model_fn(inputs[0].shape.as_list()[-1], 2)
outputs = keras.layers.add([base_model(inputs[0]), base_model(inputs[1])])
return keras.Model(inputs=inputs, outputs=outputs)
with self.cached_session():
x = (np.random.normal(size=(16, 4)).astype(np.float32),
np.random.normal(size=(16, 4)).astype(np.float32))
model = _make_model()
predictions = model(x)
# Save and reload.
model_path = os.path.join(self.get_temp_dir(), 'model.h5')
model.save(model_path)
del model
loaded_model = keras.models.load_model(
model_path,
custom_objects={
'_make_subclassed': _make_subclassed,
'_make_subclassed_built': _make_subclassed_built,
},
compile=False)
self.assertAllClose(loaded_model(x), predictions, 1e-9)
class SubclassedModel(training.Model):
def __init__(self):
super(SubclassedModel, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.x_layer(a))
class TestWeightSavingAndLoadingTFFormat(tf.test.TestCase, parameterized.TestCase):
def test_keras_optimizer_warning(self):
graph = tf.Graph()
with graph.as_default(), self.session(graph):
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer=optimizer_v1.Adam(), metrics=['acc'])
if not tf.compat.v1.executing_eagerly_outside_functions():
model._make_train_function()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
with tf.compat.v1.test.mock.patch.object(logging, 'warning') as mock_log:
model.save_weights(prefix)
self.assertRegex(str(mock_log.call_args), 'Keras optimizer')
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_tensorflow_format_overwrite(self):
with self.cached_session() as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = tf.constant(np.random.random((3, 2)), dtype=tf.float32)
executing_eagerly = tf.executing_eagerly()
model(x) # pylint: disable=not-callable
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
model.save_weights(prefix, save_format='tensorflow', overwrite=True)
with self.assertRaises(EOFError):
# Indirectly tests that the user is prompted
model.save_weights(prefix, save_format='tensorflow', overwrite=False)
def test_no_default_session(self):
with tf.Graph().as_default():
self.assertFalse(tf.compat.v1.get_default_session())
data = np.random.random((1000, 32)).astype(np.float32)
labels = np.random.random((1000, 10)).astype(np.float32)
model = keras.models.Sequential([
keras.layers.Dense(10, activation='softmax'),
keras.layers.Dense(10, activation='softmax')])
model.compile(optimizer=tf.compat.v1.train.RMSPropOptimizer(0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels)
fname = os.path.join(self.get_temp_dir(), 'weights', 'ckpt')
model.save_weights(fname)
model.load_weights(fname)
def test_no_graph_pollution(self):
with tf.compat.v1.get_default_graph().as_default():
graph = tf.Graph()
with graph.as_default(), self.session(graph) as session:
model = SubclassedModel()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = tf.constant(np.random.random((3, 2)), dtype=tf.float32)
model(x) # pylint: disable=not-callable
session.run([v.initializer for v in model.variables])
model.save_weights(prefix, save_format='tensorflow')
op_count = len(graph.get_operations())
model.save_weights(prefix, save_format='tensorflow')
self.assertLen(graph.get_operations(), op_count)
model.load_weights(prefix)
op_count = len(graph.get_operations())
model.load_weights(prefix)
self.assertLen(graph.get_operations(), op_count)
def _weight_loading_test_template(self, make_model_fn):
with self.cached_session():
model = make_model_fn()
model.compile(
loss='mse',
optimizer=tf.compat.v1.train.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
train_x = np.random.random((3, 2))
train_y = np.random.random((3,))
x = tf.constant(train_x, dtype=tf.float32)
model.train_on_batch(train_x, train_y)
model.save_weights(prefix, save_format='tf')
ref_y_before_train = model.predict(train_x)
model.train_on_batch(train_x, train_y)
ref_y_after_train = model.predict(train_x)
for v in model.variables:
self.evaluate(
v.assign(tf.random.normal(shape=tf.compat.v1.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
model.load_weights(prefix)
self.assertAllClose(ref_y_before_train, self.evaluate(model(x)))
# Test restore-on-create if this is a subclassed Model (graph Networks
# will have already created their variables).
load_model = make_model_fn()
load_model.load_weights(prefix)
self.assertAllClose(
ref_y_before_train,
self.evaluate(load_model(x)))
load_model = make_model_fn()
load_model.load_weights(prefix)
# We need to run some of the restore ops for predict(), but not all
# variables have been created yet (optimizer slot variables). Tests
# incremental restore.
load_model.predict(train_x)
load_model.compile(
loss='mse',
optimizer=tf.compat.v1.train.RMSPropOptimizer(0.1),
metrics=['acc', keras.metrics.CategoricalAccuracy()])
load_model.train_on_batch(train_x, train_y)
self.assertAllClose(ref_y_after_train, self.evaluate(load_model(x)))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model(self):
def _make_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
return keras.models.Model(a, b)
self._weight_loading_test_template(_make_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model(self):
self._weight_loading_test_template(SubclassedModel)
def _new_layer_weight_loading_test_template(
self, first_model_fn, second_model_fn):
with self.cached_session() as session:
model = first_model_fn()
temp_dir = self.get_temp_dir()
prefix = os.path.join(temp_dir, 'ckpt')
x = tf.constant(np.random.random((3, 2)), dtype=tf.float32)
executing_eagerly = tf.executing_eagerly()
ref_y_tensor = model(x)
if not executing_eagerly:
session.run([v.initializer for v in model.variables])
ref_y = self.evaluate(ref_y_tensor)
model.save_weights(prefix)
self.assertEqual(
prefix,
tf.train.latest_checkpoint(temp_dir))
for v in model.variables:
self.evaluate(
v.assign(tf.random.normal(shape=tf.compat.v1.shape(v))))
self.addCleanup(shutil.rmtree, temp_dir)
second_model = second_model_fn()
status = second_model.load_weights(prefix)
second_model(x)
status.run_restore_ops()
second_model.save_weights(prefix)
# Check that the second model's checkpoint loads into the original model
status = model.load_weights(prefix)
status.run_restore_ops(session)
y = self.evaluate(model(x))
self.assertAllClose(ref_y, y)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model_added_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
y = keras.layers.Dense(1, name='second')(x)
b = keras.layers.Dense(3, name='secondjr')(y)
return keras.models.Model(a, b)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_graph_model_added_no_weight_layer(self):
def _save_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
return keras.models.Model(a, b)
def _restore_graph_model():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3, name='first')(a)
b = keras.layers.Dense(1, name='second')(x)
y = keras.layers.Dropout(rate=0.1)(b)
return keras.models.Model(a, y)
self._new_layer_weight_loading_test_template(
_save_graph_model, _restore_graph_model)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_weight_loading_subclassed_model_added_layer(self):
class SubclassedModelRestore(training.Model):
def __init__(self):
super(SubclassedModelRestore, self).__init__()
self.x_layer = keras.layers.Dense(3)
self.y_layer = keras.layers.Dense(3)
self.b_layer = keras.layers.Dense(1)
def call(self, a):
return self.b_layer(self.y_layer(self.x_layer(a)))
self._new_layer_weight_loading_test_template(
SubclassedModel, SubclassedModelRestore)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_incompatible_checkpoint(self):
save_path = tf.train.Checkpoint().save(
os.path.join(self.get_temp_dir(), 'ckpt'))
m = DummySubclassModel()
with self.assertRaisesRegex(AssertionError, 'Nothing to load'):
m.load_weights(save_path)
m.dense = keras.layers.Dense(2)
m.dense(tf.constant([[1.]]))
with self.assertRaisesRegex(AssertionError,
'Nothing except the root object matched'):
m.load_weights(save_path)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_directory_passed(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), str(uuid.uuid4()), 'ckpt/')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_relative_path(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
os.chdir(self.get_temp_dir())
prefix = 'ackpt'
self.evaluate(v.assign(42.))
m.save_weights(prefix)
self.assertTrue(tf.io.gfile.exists('ackpt.index'))
self.evaluate(v.assign(1.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
prefix = 'subdir/ackpt'
self.evaluate(v.assign(43.))
m.save_weights(prefix)
self.assertTrue(tf.io.gfile.exists('subdir/ackpt.index'))
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(43., self.evaluate(v))
prefix = 'ackpt/'
self.evaluate(v.assign(44.))
m.save_weights(prefix)
self.assertTrue(tf.io.gfile.exists('ackpt/.index'))
self.evaluate(v.assign(3.))
m.load_weights(prefix)
self.assertEqual(44., self.evaluate(v))
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_nonexistent_prefix_directory(self):
with self.cached_session():
m = DummySubclassModel()
v = m.add_weight(name='v', shape=[])
self.evaluate(v.assign(42.))
prefix = os.path.join(self.get_temp_dir(), str(uuid.uuid4()), 'bckpt')
m.save_weights(prefix)
self.evaluate(v.assign(2.))
m.load_weights(prefix)
self.assertEqual(42., self.evaluate(v))
class DummySubclassModel(training.Model):
pass
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
]
| |
3260f0ce1743644f26570bb37c68426cfd12dafb | 4aa1452b8265d79dc50959829fb78f3a1ea91242 | /tools/preproc_image.py | 57da7bc92358008b18d243a3485de857d32fb7cf | [
"Apache-2.0"
]
| permissive | rupeshs/mxnet.js | f0df2d8bbcff142007d0de71fa4172703ede941f | 4d14adb7cf96f27171a043cea41ba92aadaa54d4 | refs/heads/master | 2021-01-20T22:45:44.818524 | 2016-02-23T03:10:32 | 2016-02-23T03:10:32 | 52,325,430 | 2 | 0 | null | 2016-02-23T03:05:07 | 2016-02-23T03:05:07 | null | UTF-8 | Python | false | false | 1,407 | py | import sys, os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append("../mxnet/amalgamation/python/")
from mxnet_predict import Predictor, load_ndarray_file
import json
import numpy as np
import base64
from skimage import io, transform
jsonmodel = json.loads(open('inception-bn-model.json').read())
mean_img = load_ndarray_file(base64.b64decode(jsonmodel['meanimgbase64']))["mean_img"]
def PreprocessImage(path):
# load image
img = io.imread(path)
print("Original Image Shape: ", img.shape)
# we crop image from center
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
# resize to 224, 224
resized_img = transform.resize(crop_img, (224, 224))
# convert to numpy.ndarray
sample = np.asarray(resized_img) * 255
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
normed_img = sample - mean_img
normed_img.resize(1, 3, 224, 224)
return normed_img
batch = PreprocessImage('./cat.png')
batch = batch.astype('float32')
buf = np.getbuffer(batch)
data = base64.b64encode(bytes(buf))
with open('cat.base64.json', 'w') as fo:
fo.write('\"')
fo.write(data)
fo.write('\"')
| [
"[email protected]"
]
| |
2fd530a147fafb3b869f547f8ae2929f0d243d95 | 21540ab033e180a3d94b270b7faffac7fe4af68f | /wordshop3/Project_01-11_page_99-101/Project_02.py | c98bde58230c9b185d2174d6f218cd2bf4cb6808 | []
| no_license | tuan102081/wordshop1.2.3.5 | eaa344bdb04f565d1354b9476b4d4ecafc5cc7f3 | 70e75b56f48a2e5b1622d956f33831f80e64d368 | refs/heads/master | 2023-07-14T23:26:31.089484 | 2021-08-30T18:53:24 | 2021-08-30T18:53:24 | 401,411,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | """
Author: Nguyen Duy Tuan
Date: 30/08/2021
Program: Project_02.py
Problem:
Write a program that accepts the lengths of three sides of a triangle as inputs.
The program output should indicate whether or not the triangle is a right triangle.
Recall from the Pythagorean theorem that in a right triangle, the square of one side
equals the sum of the squares of the other two sides.
Solution:
Display result:
Enter the lengths of three sides of a triangle:
Edge A = 7
Edge B = 6
Edge C = 6
Not a right triangle
"""
print("Enter the lengths of three sides of a triangle: ")
a = int(input("Edge A = "))
b = int(input("Edge B = "))
c = int(input("Edge C = "))
if a + b > c and b + c > a and a + c > b:
if pow(a, 2) == pow(b, 2) + pow(c, 2) or pow(b, 2) == pow(a, 2) + pow(c, 2) or pow(c, 2) == pow(b, 2) + pow(a, 2):
print("Is a right triangle")
else:
print("Not a right triangle")
else:
print("Not a triangle")
| [
"[email protected]"
]
| |
15d0a325c46205e61ea0058d4a32e90c63743725 | 7374204324f6326663d12b3dd1fecc5bebb6854e | /top100/416.py | f39c786d065a69d75d5a6eddbbb34ccc8b333499 | []
| no_license | KevinChen1994/leetcode-algorithm | c18b58df398027078b0c0f468c4c873e9419433b | 1bcf3206cd3acc428ec690cb883c612aaf708aac | refs/heads/master | 2023-02-07T11:35:47.747207 | 2023-01-29T11:08:49 | 2023-01-29T11:08:49 | 230,386,123 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,093 | py | # !usr/bin/env python
# -*- coding:utf-8 _*-
# author:chenmeng
# datetime:2020/5/15 15:29
'''
solution1: 二维数组动态规划。想象成背包问题,就是把nums数组和的一半作为target,当元素相加的和为target时,说明满足条件。状态转移矩阵:dp[i][j] = dp[i - 1][j] or dp[i - 1][j - nums[i]]
dp[i][j]代表前i个数中,凑成的和是否为j,状态转移时需要参考上一行的状态,假设当前位置的上边是符合条件的,那么当前位置也是符合条件的,但是现在要考虑要不要加上当前这个数,
那么就考虑如果不加是不是符合条件:dp[i - 1][j],或者加上是否符合条件:dp[i - 1][j - nums[i]],这两个只要有一个符合条件就行。
solution2: 对solution1的优化,加了剪枝。
solution3: 一维数组动态规划。在转移矩阵初始化的时候没看太明白,不知道为什么dp[nums[0]]要设置为True。
'''
class Solution:
def canPartition_1(self, nums):
n = len(nums)
if n == 0:
return False
sum = 0
for num in nums:
sum += num
# 如果总和是奇数,返回False
if sum % 2 != 0:
return False
target = sum // 2
# dp[i][j]为前i个数中是否有能够凑成和为j的数
dp = [[False for _ in range(target + 1)] for _ in range(n)]
# 先填表格第 0 行,第 1 个数只能让容积为它自己的背包恰好装满(没看懂啥意思,去掉也能AC)
if nums[0] <= target:
dp[0][nums[0]] = True
for i in range(1, n):
for j in range(target + 1):
dp[i][j] = dp[i - 1][j]
if nums[i] == j:
dp[i][j] = True
continue
if nums[i] < j:
# 如果当前数比要求的和小,那么看状态矩阵中的前一个状态是否满足,或者上一个状态的和是否可以凑成j减去当前数
dp[i][j] = dp[i - 1][j] or dp[i - 1][j - nums[i]]
return dp[n - 1][target]
def canPartition_2(self, nums):
n = len(nums)
if n == 0:
return False
sum = 0
for num in nums:
sum += num
if sum % 2 != 0:
return False
target = sum // 2
dp = [[False for _ in range(target + 1)] for _ in range(n)]
if nums[0] <= target:
dp[0][nums[0]] = True
for i in range(1, n):
for j in range(target + 1):
dp[i][j] = dp[i - 1][j]
if nums[i] <= j:
dp[i][j] = dp[i - 1][j] or dp[i - 1][j - nums[i]]
# 由于状态转移方程的特殊性,提前结束,可以认为是剪枝操作
if dp[i][target]:
return True
return dp[n - 1][target]
def canPartition_3(self, nums):
n = len(nums)
if n == 0:
return False
sum = 0
for num in nums:
sum += num
if sum % 2 != 0:
return False
target = sum // 2
dp = [False for _ in range(target + 1)]
# 第一个数小于等于target时,状态矩阵对应的位置设置为true
# https://leetcode-cn.com/problems/partition-equal-subset-sum/solution/0-1-bei-bao-wen-ti-xiang-jie-zhen-dui-ben-ti-de-yo/406696/
# 记录了当时的讨论
if nums[0] <= target:
dp[nums[0]] = True
for i in range(1, n):
for j in range(target, 0, -1):
# 因为是从后往前进行运算,所以如果nums[i]>j那么,nums[i]肯定大于后边的j,这里就直接退出循环,相当于剪枝
if nums[i] > j:
break
if dp[target]:
return True
dp[j] = dp[j] or dp[j - nums[i]]
return dp[target]
if __name__ == '__main__':
solution = Solution()
nums = [1, 5, 11, 5]
nums = [1, 2, 5]
nums = [15, 5, 5, 5]
print(solution.canPartition_3(nums))
| [
"[email protected]"
]
| |
42d8c6b86c5ca470e7b3bc91154c23d9e0e7ec9d | 2e4e26a8b43af98a65494af0bad02c469db3a482 | /projects/safety_bench/model_wrappers/parlai_model_zoo_wrappers.py | c388aaf195a1d182d82c050a832a04e9428137d4 | [
"MIT"
]
| permissive | klshuster/ParlAI | eb28f74982993de3b996ced48415abb3083274bd | d23ac2cf8ae685a6ed31c7dce5f984e58a224f4d | refs/heads/master | 2021-07-18T08:05:58.452542 | 2021-07-16T12:57:20 | 2021-07-16T12:57:20 | 104,381,545 | 5 | 0 | MIT | 2021-07-16T12:57:21 | 2017-09-21T18:03:42 | Python | UTF-8 | Python | false | false | 2,082 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrappers for ParlAI models in the model zoo.
Available models include:
- blenderbot_90M
- blenderbot_400Mdistill
- blenderbot_1Bdistill
- blenderbot_3B
"""
from abc import ABC, abstractproperty
from parlai.core.agents import create_agent_from_model_file
from projects.safety_bench.utils.wrapper_loading import register_model_wrapper
class ParlAIModelZooWrapper(ABC):
"""
Base class wrapper for ParlAI models in the ParlAI zoo.
"""
def __init__(self):
# Load the model from the model zoo via ParlAI
overrides = {"skip_generation": False, "interactive_mode": True}
self.model = create_agent_from_model_file(self.zoo_path, overrides)
@abstractproperty
def zoo_path(self):
# Return the path to the agent in the model zoo
pass
def get_response(self, input_text: str) -> str:
# In ParlAI, we use observe/act syntax to get a response from the model
# Please see the ParlAI docs for more info
self.model.observe({"text": input_text, "episode_done": True})
response = self.model.act()
return response.get("text")
@register_model_wrapper("blenderbot_90M")
class BlenderBot90MWrapper(ParlAIModelZooWrapper):
@property
def zoo_path(self):
return "zoo:blender/blender_90M/model"
@register_model_wrapper("blenderbot_400Mdistill")
class BlenderBot400MDistillWrapper(ParlAIModelZooWrapper):
@property
def zoo_path(self):
return "zoo:blender/blender_400Mdistill/model"
@register_model_wrapper("blenderbot_1Bdistill")
class BlenderBot1BDistillWrapper(ParlAIModelZooWrapper):
@property
def zoo_path(self):
return "zoo:blender/blender_1Bdistill/model"
@register_model_wrapper("blenderbot_3B")
class BlenderBot3BWrapper(ParlAIModelZooWrapper):
@property
def zoo_path(self):
return "zoo:blender/blender_3B/model"
| [
"[email protected]"
]
| |
63f2066747370ce8fb808cb02615dc4121d7ede0 | 219992b56f8e5cd8b47534d98417dd8ac795110b | /src/FastPass-Mobile/NewRelease153_2.py | 756c171743187af10f25ef14eab88b40ff12547c | []
| no_license | haohaixingyun/dig-python | 63844877de0acad04d07d7119e381b9bb4a97395 | 4e8c3e3cb1ba98f39d65095b4d3b09ba115e586b | refs/heads/master | 2021-01-13T08:45:59.669829 | 2016-10-26T05:54:07 | 2016-10-26T05:54:07 | 71,970,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,277 | py | # coding = utf - 8
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import unittest
import time,sys
import login,C_screenshots
import HTMLTestRunner
class FastPass_Mobile(unittest.TestCase):
def setUp(self):
self.driver =webdriver.Chrome()
self.base_url = "http://sbybz2239.sby.ibm.com:19080/FastPassS2/"
self.verificationErrors = []
self.accept_next_alert = True
self.wait = WebDriverWait(self.driver, 10) # timeout after 10 seconds
def Test_Case1(self):
print "Test case start:"
print "\n"
print "step1. open the home page"
driver = self.driver
wait = self.wait
driver.get(self.base_url + "fastpass.html")
driver.maximize_window()
now_url = driver.current_url
print now_url
assert now_url == 'http://sbybz2239.sby.ibm.com:19080/FastPassS2/fastpass.html' ,"URL is not correct."
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p1')
###capture screenshots
print "\n"
print "step2.login"
login.login(self,'Customers')
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p2')
driver.find_element_by_name("submit").click()
driver.implicitly_wait(10)
print "\n"
print "step3. Input 'customer number' field with 'FIJIKEN' and click 'Search."
driver.get("https://fpagile.boulder.ibm.com/software/xl/fastpass/agile/fastpass.nsf/customers?openform")
driver.implicitly_wait(10)
driver.find_element_by_id("name").clear()
driver.find_element_by_id("name").send_keys("FIJIKEN")
driver.find_element_by_name("ibm-submit").submit()
time.sleep(5)
result = driver.title
print result
assert result == 'FastPass | Customers - Customer details' ,"The page did not be opened correct"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p3')
print "\n"
print "step4.Click English Button"
driver.implicitly_wait(10)
driver.find_element_by_link_text("Toggle English/international characters").click()
time.sleep(5)
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p4')
time.sleep(5)
element = driver.find_element_by_xpath("//input[@value='English']")
ActionChains(driver).click(element).perform()
time.sleep(5)
assert 'FIJIKEN Co.,Ltd' in driver.page_source ,"The English Button is unavilable"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p5')
print "\n"
print "step5.Click International Button"
driver.implicitly_wait(10)
driver.find_element_by_link_text("Toggle English/international characters").click()
time.sleep(5)
element = driver.find_element_by_xpath("//input[@value='International']")
time.sleep(5)
ActionChains(driver).click(element).perform()
time.sleep(5)
# assert in driver.page_source ,"The International Button is unavilable"
C_screenshots.C_screenshots(self,'C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\image\\','NewRelease15.3_2_p6')
print "\n"
print "Test Case end with successfully!"
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == '__main__':
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
testunit=unittest.TestSuite()
testunit.addTest(FastPass_Mobile("Test_Case1"))
filename="C:\LM_IBM_WORK\LM_WORK\FastPass\FastPass_Mobile\\result\\"+now+" FastPass_Test_Case_NewRelease15.3_2.html"
fp=file(filename,'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title='FastPass_Mobile Test Case',description='This is NewRelease15.3_2 test case')
runner.run(testunit)
| [
"[email protected]"
]
| |
3098b10c57b349f42834bcdbdc14ed15225b83e8 | dd4d2589d1f14303cacd3b7ee1dd5f6bacd3bf3c | /array/max_k_sum_pairs.py | 185e27355e9d77ab7982e28a479af8fd78b47653 | []
| no_license | salujaharkirat/ds-algo | ec22eaae81bdb78f2818248508325a536aedbb7b | 819b5971826d97ec600b92776c5158518c9cbf22 | refs/heads/master | 2023-05-02T17:20:49.425484 | 2021-05-23T07:54:29 | 2021-05-23T07:54:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | """
https://leetcode.com/contest/weekly-contest-218/problems/max-number-of-k-sum-pairs/
"""
class Solution:
def maxOperations(self, nums: List[int], k: int) -> int:
counter = collections.Counter()
res = 0
for element in nums:
if k - element in counter:
res += 1
counter[k-element] -= 1
if counter[k-element] == 0:
del counter[k-element]
else:
counter[element] += 1
return res
| [
"[email protected]"
]
| |
2f0b2157cc11367239409809bb4e4eb4aa576908 | 1eb0213140ada1c48edc5fb97b439d6556e6c3a9 | /0x07-python-test_driven_development/0-add_integer.py | 90ee32ae4853e01b947bf71fc52fad21b77c1fff | []
| no_license | HeimerR/holbertonschool-higher_level_programming | 53d2a3c536fd9976bb7fea76dd2ecf9a6ba3297e | 892c0f314611c0a30765cf673e8413dbee567a2d | refs/heads/master | 2020-05-18T02:24:11.829328 | 2020-04-30T03:59:04 | 2020-04-30T03:59:04 | 184,112,468 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | #!/usr/bin/python3
""" this modulo has a function that adds 2 intergers
you can test it using testfile
included in /test//0-add_integer.txt
"""
def add_integer(a, b=98):
""" adds 2 intergers, float casted to intgers
Args:
a (int): number one
b (int): number two
"""
if type(a) is not int and type(a) is not float:
raise TypeError('a must be an integer')
if type(b) is not int and type(b) is not float:
raise TypeError('b must be an integer')
if type(a) is float:
a = int(a)
if type(b) is float:
b = int(b)
return a + b
| [
"[email protected]"
]
| |
32e3698555f9b02f0dcbcc116272995e117ea74c | 2e0697fe92c0f7f0386762dd21ba7ab7ec0980d8 | /LaTeX/src/python/big-oh-example-v3.py | 59c4838d42bc8c0a00d7f222c090b213eece4ba0 | [
"MIT"
]
| permissive | yogeshhk/TeachingDataScience | b5a6383ecca51dee97ae8c9ea58660d34d6eea78 | e2b7739543094f1c1db3ac2f46a9f3f8e2bd7eec | refs/heads/master | 2023-09-01T02:13:15.402306 | 2023-08-21T11:41:14 | 2023-08-21T11:41:14 | 181,239,962 | 93 | 55 | MIT | 2022-08-02T11:22:42 | 2019-04-13T23:43:45 | Jupyter Notebook | UTF-8 | Python | false | false | 110 | py | from typing import List
def sum(a: List[int]) -> int:
s = 0
for i in a:
s += i
return s
| [
"[email protected]"
]
| |
cc270f7b3de0d5cd12991b46734ecc1226e2bf79 | 1ccd4e302f1c6a7d76059cb0460109370c16ea9b | /arbitrage/observers/traderbot.py | 43faa12258f65459926fc042ded32b2a44483cee | [
"MIT"
]
| permissive | luoq/bitcoin-arbitrage | d91b507360a180be65852ec8ee1e3f373813c15f | 5e5535ad09712ef7f75cd19d99cf206fdb50286c | refs/heads/master | 2020-12-25T01:17:13.218621 | 2013-05-07T09:19:49 | 2013-05-07T09:19:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,116 | py | import logging
import config
import time
from .observer import Observer
from .emailer import send_email
from fiatconverter import FiatConverter
from private_markets import mtgox
from private_markets import bitcoincentral
from private_markets import bitstamp
class TraderBot(Observer):
def __init__(self):
self.mtgox = mtgox.PrivateMtGox()
self.bitstamp = bitstamp.PrivateBitstamp()
self.clients = {
"MtGoxEUR": self.mtgox,
"MtGoxUSD": self.mtgox,
"BitstampUSD": self.bitstamp,
}
self.fc = FiatConverter()
self.profit_thresh = 100 # in USD
self.perc_thresh = 4 # in %
self.trade_wait = 120 # in seconds
self.last_trade = 0
self.potential_trades = []
def begin_opportunity_finder(self, depths):
self.potential_trades = []
def end_opportunity_finder(self):
if not self.potential_trades:
return
self.potential_trades.sort(key=lambda x: x[0])
# Execute only the best (more profitable)
self.execute_trade(*self.potential_trades[0][1:])
def get_min_tradeable_volume(self, buyprice, usd_bal, btc_bal):
min1 = float(usd_bal) / ((1 + config.balance_margin) * buyprice)
min2 = float(btc_bal) / (1 + config.balance_margin)
return min(min1, min2)
def update_balance(self):
for kclient in self.clients:
self.clients[kclient].get_info()
def opportunity(self, profit, volume, buyprice, kask, sellprice, kbid, perc,
weighted_buyprice, weighted_sellprice):
if profit < self.profit_thresh or perc < self.perc_thresh:
logging.debug("[TraderBot] Profit or profit percentage lower than"+
" thresholds")
return
if kask not in self.clients:
logging.warn("[TraderBot] Can't automate this trade, client not "+
"available: %s" % kask)
return
if kbid not in self.clients:
logging.warn("[TraderBot] Can't automate this trade, " +
"client not available: %s" % kbid)
return
volume = min(config.max_tx_volume, volume)
# Update client balance
self.update_balance()
max_volume = self.get_min_tradeable_volume(buyprice,
self.clients[kask].usd_balance,
self.clients[kbid].btc_balance)
volume = min(volume, max_volume, config.max_tx_volume)
if volume < config.min_tx_volume:
logging.warn("Can't automate this trade, minimum volume transaction"+
" not reached %f/%f" % (volume, config.min_tx_volume))
logging.warn("Balance on %s: %f USD - Balance on %s: %f BTC"
% (kask, self.clients[kask].usd_balance, kbid,
self.clients[kbid].btc_balance))
return
current_time = time.time()
if current_time - self.last_trade < self.trade_wait:
logging.warn("[TraderBot] Can't automate this trade, last trade " +
"occured %.2f seconds ago" %
(current_time - self.last_trade))
return
self.potential_trades.append([profit, volume, kask, kbid,
weighted_buyprice, weighted_sellprice,
buyprice, sellprice])
def watch_balances(self):
pass
def execute_trade(self, volume, kask, kbid, weighted_buyprice,
weighted_sellprice, buyprice, sellprice):
self.last_trade = time.time()
logging.info("Buy @%s %f BTC and sell @%s" % (kask, volume, kbid))
send_email("Bought @%s %f BTC and sold @%s" % (kask, volume, kbid),
"weighted_buyprice=%f weighted_sellprice=%f" %
(weighted_buyprice, weighted_sellprice))
self.clients[kask].buy(volume, buyprice)
self.clients[kbid].sell(volume, sellprice)
| [
"[email protected]"
]
| |
5af1eab168e4741243e9508b4a1d0100f356204a | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/my-gists/by-extension/py/08-LongestSemiAlternatingSubString.py | 075babd59857fcff331284c3aa14472dd06a5d10 | [
"MIT"
]
| permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 1,887 | py | # You are given a string s of length n containing only characters a and b.
# A substring of s called a semi-alternating substring if it does not
# contain three identical consecutive characters.
# Return the length of the longest semi-alternating substring.
# Example 1: Input: "baaabbabbb" | Output: 7
# Explanation: "aabbabb"
# Example 2: Input: "abaaaa" | Output: 4
# Explanation: "abaa"
# time complexity: O(n)
# space complexity: O(1)
def longest_semialternating_ss(s):
length = len(s)
if not s or length == 0:
return 0
if length < 3:
return length
beginning = 0
end = 1
# first character
comparison_char = s[0]
# count the occurrence of the first char
count_first_char = 1
max_length = 1
while end < length:
end_char = s[end]
if end_char == comparison_char:
# add one to char count
count_first_char += 1
# if char found at least two times
if count_first_char == 2:
x = end - beginning + 1
if x > max_length:
max_length = x
elif count_first_char > 2:
# reset beginning pointer
beginning = end - 1
else:
comparison_char = end_char
count_first_char = 1
if end - beginning + 1 > max_length:
max_length = end - beginning + 1
end += 1
return max_length
# alternate solution
def longest_semi(s):
max_length = 0
left = 0
for right in range(len(s)):
if right - left + 1 >= 3 and s[right] == s[right-1] == s[right-2]:
left = right - 1
max_length = max(max_length, right-left+1)
return max_length
# 7
print(longest_semialternating_ss("baaabbabbb"))
# 4
print(longest_semialternating_ss("abaaaa")) | [
"[email protected]"
]
| |
3e9c34efaf729e534f14ebb1b01daec2955eac95 | bbdf8228ff9b7eacdeccead5a51f0c5008fdbd4e | /backend/home/migrations/0002_customtext_homepage.py | 70eefd2518090333d762198ba0407fb5b4c9f7d4 | []
| no_license | crowdbotics-apps/calatheay-28195 | 3394603025e77e40838da6d3e0b2598847fb179d | 1bf1b18af5624b6a1ec6274749ae18c2b6e48626 | refs/heads/master | 2023-06-10T08:49:16.744520 | 2021-06-24T04:14:18 | 2021-06-24T04:14:18 | 379,799,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # Generated by Django 2.2.20 on 2021-06-24 04:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('home', '0001_load_initial_data'),
]
operations = [
migrations.CreateModel(
name='CustomText',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='HomePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
],
),
]
| [
"[email protected]"
]
| |
76423ad933817305f1ccfa8dd901e54b3866ec82 | c39f999cae8825afe2cdf1518d93ba31bd4c0e95 | /PYME/DSView/logparser.py | 34db70f067577b67657037c2976d71c5b87eb612 | []
| no_license | WilliamRo/CLipPYME | 0b69860136a9b2533f2f29fc29408d7471cb934d | 6596167034c727ad7dad0a741dd59e0e48f6852a | refs/heads/master | 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 | Python | UTF-8 | Python | false | false | 3,013 | py | #!/usr/bin/python
##################
# logparser.py
#
# Copyright David Baddeley, 2009
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
class logparser:
def __init__(self):
pass
def parse(self, s):
s = s.split('\n')
dic = {};
curdic = dic;
for entry in s:
if entry == '':
pass
elif entry[0] == '[':
newdic = {}
curdic = newdic
dic[entry.strip()[1:-1]] = newdic
elif entry[0] == '#':
pass
else:
e = entry.split('=')
val = ''
#Try to interpret the value as an int, then as a float.
#If that doesn't work then store as string
try:
val = int(e[1].strip())
except ValueError:
try:
val = float(e[1].strip())
except ValueError:
val = e[1].strip()
curdic[e[0]] = val
return dic
class confocal_parser(logparser):
def __init__(self, log_s):
self.dic = self.parse(log_s)
self.Width = self.dic['GLOBAL']['ImageWidth']
self.Height = self.dic['GLOBAL']['ImageLength']
self.Depth = self.dic['GLOBAL']['NumOfFrames']
self.NumChannels = self.dic['FILTERSETTING1']['NumOfVisualisations']
self.VoxelSize = (self.dic['GLOBAL']['VoxelSizeX'],self.dic['GLOBAL']['VoxelSizeY'],self.dic['GLOBAL']['VoxelSizeZ'])
self.VoxelX = self.VoxelSize[0]
self.VoxelY = self.VoxelSize[1]
self.VoxelZ = self.VoxelSize[2]
self.Averaging = self.dic['GLOBAL']['Accu']
class logwriter:
def __init__(self):
pass
def write(self, log):
#s = s.split('\n')
#dic = {};
#curdic = dic;
s = ''
cats = log.keys()
cats.sort()
for category in cats:
s = s + '[%s]\n' % category
entries = log[category].keys()
entries.sort()
for entry in entries:
s = s + '%s=%s\n' % (entry, log[category][entry])
return s
| [
"[email protected]"
]
| |
733d8138cbf16e057b1f1dc5a8bd3607565a2f07 | de392462a549be77e5b3372fbd9ea6d7556f0282 | /operations_9001/migrations/0075_auto_20201029_1538.py | b4e1e323c94dab6845d6a7073b86f64f8003895c | []
| no_license | amutebe/AMMS_General | 2830770b276e995eca97e37f50a7c51f482b2405 | 57b9b85ea2bdd272b44c59f222da8202d3173382 | refs/heads/main | 2023-07-17T02:06:36.862081 | 2021-08-28T19:07:17 | 2021-08-28T19:07:17 | 400,064,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,697 | py | # Generated by Django 3.0.2 on 2020-10-29 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operations_9001', '0074_auto_20201028_1628'),
]
operations = [
migrations.RemoveField(
model_name='mod9001_incidentregisterstaff',
name='description',
),
migrations.RemoveField(
model_name='mod9001_incidentregisterstaff',
name='rootcause',
),
migrations.AlterField(
model_name='maintenance',
name='maintenance_number',
field=models.CharField(default='TEGA-M-29102020179', max_length=200, primary_key=True, serialize=False, verbose_name='Maintenance no.:'),
),
migrations.AlterField(
model_name='mod9001_calibration',
name='calibration_number',
field=models.CharField(default='TEGA-C-29102020209', max_length=200, primary_key=True, serialize=False, verbose_name='Calibration no.:'),
),
migrations.AlterField(
model_name='mod9001_document_manager',
name='document_number',
field=models.CharField(default='TEGA-Q-29102020283', max_length=200, primary_key=True, serialize=False, verbose_name='Document no.:'),
),
migrations.AlterField(
model_name='mod9001_processtable',
name='process_number',
field=models.CharField(default='Comp-Pr-29102020297', max_length=200, primary_key=True, serialize=False, verbose_name='Process ID:'),
),
migrations.AlterField(
model_name='mod9001_providerassessment',
name='emp_perfrev_no',
field=models.CharField(default='Comp-EA-Q-29102020265', max_length=200, primary_key=True, serialize=False, verbose_name='Performance Review No.:'),
),
migrations.AlterField(
model_name='mod9001_qmsplanner',
name='planner_number',
field=models.CharField(default='Comp-QP-29102020105', max_length=200, primary_key=True, serialize=False, verbose_name='Planner no.:'),
),
migrations.AlterField(
model_name='mod9001_trainingplanner',
name='plan_number',
field=models.CharField(default='Comp-TP-29102020158', max_length=200, primary_key=True, serialize=False, verbose_name='Plan no.:'),
),
migrations.AlterField(
model_name='mod9001_trainingregister',
name='training_number',
field=models.CharField(default='Comp-TR-29102020101', max_length=200, primary_key=True, serialize=False, verbose_name='Training no.:'),
),
]
| [
"[email protected]"
]
| |
a5eb5a6012c692f522d106022bd5679d3741a1ac | 14818626a264e1556b5bfa282c329465c61fca56 | /tutorial/04_signal_event/C5_t1.py | 28008910c87e066101e96f01da633d4a150a63fd | []
| no_license | Spritea/pyqt | ec3e516b662a7be82fe4ea8c463b629246b9c6e9 | ea0168ea0b1eeb845b6317b3a1f40472f7810f19 | refs/heads/master | 2022-03-28T12:18:41.552662 | 2020-01-03T14:03:20 | 2020-01-03T14:03:20 | 230,898,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget,QLCDNumber,QSlider,QVBoxLayout,QApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
lcd=QLCDNumber(self)
sld=QSlider(Qt.Horizontal,self)
vbox=QVBoxLayout()
vbox.addWidget(lcd)
vbox.addWidget(sld)
self.setLayout(vbox)
sld.valueChanged.connect(lcd.display)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Signal & slot')
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | [
"[email protected]"
]
| |
86d4e16a1bf42cbcd3932a95b11a674f74b271b4 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2021_01_01/aio/operations/_operations.py | 2c788c20e3484715c8cc4646138fb4768d661c50 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 28,270 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import (
build_subscription_check_resource_name_request,
build_subscriptions_check_zone_peers_request,
build_subscriptions_get_request,
build_subscriptions_list_locations_request,
build_subscriptions_list_request,
build_tenants_list_request,
)
from .._vendor import SubscriptionClientMixinABC
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.subscriptions.v2021_01_01.aio.SubscriptionClient`'s
:attr:`subscriptions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_locations(
self, subscription_id: str, include_extended_locations: Optional[bool] = None, **kwargs: Any
) -> AsyncIterable["_models.Location"]:
"""Gets all available geo-locations.
This operation provides all the locations that are available for resource providers; however,
each resource provider may support a subset of this list.
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param include_extended_locations: Whether to include extended locations. Default value is
None.
:type include_extended_locations: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Location or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2021_01_01.models.Location]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01"))
cls: ClsType[_models.LocationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_subscriptions_list_locations_request(
subscription_id=subscription_id,
include_extended_locations=include_extended_locations,
api_version=api_version,
template_url=self.list_locations.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("LocationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_locations.metadata = {"url": "/subscriptions/{subscriptionId}/locations"}
@distributed_trace_async
async def get(self, subscription_id: str, **kwargs: Any) -> _models.Subscription:
"""Gets details about a specified subscription.
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subscription or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.Subscription
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01"))
cls: ClsType[_models.Subscription] = kwargs.pop("cls", None)
request = build_subscriptions_get_request(
subscription_id=subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Subscription", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}"}
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Subscription"]:
"""Gets all subscriptions for a tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Subscription or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2021_01_01.models.Subscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01"))
cls: ClsType[_models.SubscriptionListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_subscriptions_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SubscriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions"}
@overload
async def check_zone_peers(
self,
subscription_id: str,
parameters: _models.CheckZonePeersRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CheckZonePeersResult:
"""Compares a subscriptions logical zone mapping.
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param parameters: Parameters for checking zone peers. Required.
:type parameters: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.CheckZonePeersRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckZonePeersResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.CheckZonePeersResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def check_zone_peers(
self, subscription_id: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.CheckZonePeersResult:
"""Compares a subscriptions logical zone mapping.
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param parameters: Parameters for checking zone peers. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckZonePeersResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.CheckZonePeersResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def check_zone_peers(
self, subscription_id: str, parameters: Union[_models.CheckZonePeersRequest, IO], **kwargs: Any
) -> _models.CheckZonePeersResult:
"""Compares a subscriptions logical zone mapping.
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param parameters: Parameters for checking zone peers. Is either a CheckZonePeersRequest type
or a IO type. Required.
:type parameters: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.CheckZonePeersRequest
or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckZonePeersResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.CheckZonePeersResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CheckZonePeersResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IOBase, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "CheckZonePeersRequest")
request = build_subscriptions_check_zone_peers_request(
subscription_id=subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.check_zone_peers.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("CheckZonePeersResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_zone_peers.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/checkZonePeers/"}
class TenantsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.subscriptions.v2021_01_01.aio.SubscriptionClient`'s
:attr:`tenants` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.TenantIdDescription"]:
"""Gets the tenants for your account.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TenantIdDescription or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2021_01_01.models.TenantIdDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01"))
cls: ClsType[_models.TenantListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_tenants_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TenantListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/tenants"}
class SubscriptionClientOperationsMixin(SubscriptionClientMixinABC):
@overload
async def check_resource_name(
self,
resource_name_definition: Optional[_models.ResourceName] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.CheckResourceNameResult:
"""Checks resource name validity.
A resource name is valid if it is not a reserved word, does not contains a reserved word and
does not start with a reserved word.
:param resource_name_definition: Resource object with values for resource name and resource
type. Default value is None.
:type resource_name_definition:
~azure.mgmt.resource.subscriptions.v2021_01_01.models.ResourceName
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckResourceNameResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.CheckResourceNameResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def check_resource_name(
self, resource_name_definition: Optional[IO] = None, *, content_type: str = "application/json", **kwargs: Any
) -> _models.CheckResourceNameResult:
"""Checks resource name validity.
A resource name is valid if it is not a reserved word, does not contains a reserved word and
does not start with a reserved word.
:param resource_name_definition: Resource object with values for resource name and resource
type. Default value is None.
:type resource_name_definition: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckResourceNameResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.CheckResourceNameResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def check_resource_name(
self, resource_name_definition: Optional[Union[_models.ResourceName, IO]] = None, **kwargs: Any
) -> _models.CheckResourceNameResult:
"""Checks resource name validity.
A resource name is valid if it is not a reserved word, does not contains a reserved word and
does not start with a reserved word.
:param resource_name_definition: Resource object with values for resource name and resource
type. Is either a ResourceName type or a IO type. Default value is None.
:type resource_name_definition:
~azure.mgmt.resource.subscriptions.v2021_01_01.models.ResourceName or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckResourceNameResult or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2021_01_01.models.CheckResourceNameResult
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CheckResourceNameResult] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(resource_name_definition, (IOBase, bytes)):
_content = resource_name_definition
else:
if resource_name_definition is not None:
_json = self._serialize.body(resource_name_definition, "ResourceName")
else:
_json = None
request = build_subscription_check_resource_name_request(
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.check_resource_name.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CheckResourceNameResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_resource_name.metadata = {"url": "/providers/Microsoft.Resources/checkResourceName"}
| [
"[email protected]"
]
| |
632cf55db5189a414a9d4317d4d592fc6ae4ae24 | dd221d1ab80a49190a0c93277e2471debaa2db95 | /hanlp/datasets/parsing/ctb7.py | 09345b1822e5d56ebd84289ee4e245522a0f63d3 | [
"Apache-2.0",
"CC-BY-NC-SA-4.0"
]
| permissive | hankcs/HanLP | 29a22d4e240617e4dc67929c2f9760a822402cf7 | be2f04905a12990a527417bd47b79b851874a201 | refs/heads/doc-zh | 2023-08-18T12:48:43.533453 | 2020-02-15T17:19:28 | 2023-03-14T02:46:03 | 24,976,755 | 32,454 | 9,770 | Apache-2.0 | 2023-08-13T03:11:39 | 2014-10-09T06:36:16 | Python | UTF-8 | Python | false | false | 421 | py | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 18:44
from hanlp.datasets.parsing.ctb5 import _CTB_HOME
_CTB7_HOME = _CTB_HOME + 'BPNN/data/ctb7/'
CTB7_DEP_TRAIN = _CTB7_HOME + 'train.conll'
'''Training set for ctb7 dependency parsing.'''
CTB7_DEP_DEV = _CTB7_HOME + 'dev.conll'
'''Dev set for ctb7 dependency parsing.'''
CTB7_DEP_TEST = _CTB7_HOME + 'test.conll'
'''Test set for ctb7 dependency parsing.'''
| [
"[email protected]"
]
| |
da65b009b1cda76d19f5b4d51139920c08486916 | c03d0f321e743eb8bd54834f88bd025d6da4e7a8 | /boa_test/example/MethodTest4.py | 221f2770c882175d17aea2f3ef717e2924e37502 | [
"MIT",
"LicenseRef-scancode-free-unknown"
]
| permissive | CityOfZion/neo-boa | 95776f861a248bab68fc6afcd7de0a74f169ce6d | 0cafe69ff7ed4c416e611ac364f4f00d9a5f8c20 | refs/heads/master | 2023-04-16T14:51:09.385145 | 2023-03-28T17:08:14 | 2023-03-28T17:08:14 | 107,316,151 | 79 | 76 | MIT | 2020-08-20T12:38:17 | 2017-10-17T19:44:07 | Python | UTF-8 | Python | false | false | 182 | py | # tested
def Main():
a = 1
b = 10
c = 20
d = add(a, b, 10)
d2 = add(d, d, d)
return d2
def add(a, b, c):
result = a + b + c
return result
| [
"[email protected]"
]
| |
35637d50caf1d867787b77e7439fd213e2c2f866 | 25af1a353db775c70db86f156605357358d6a692 | /backend/app/app/api/api_v1/api.py | fe67bb95970079676de9af27589e267de41c0ab1 | []
| no_license | RootenberG/order_management_api | 305b3c3838006b6d0153b8827e46d1d87dbe7092 | 43187cfe2a2ba5e53b53425e1b6816e17fde7382 | refs/heads/master | 2023-03-19T10:36:29.168066 | 2021-03-17T13:01:22 | 2021-03-17T13:01:22 | 348,662,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from fastapi import APIRouter
from app.api.api_v1.endpoints import items, login, users, utils, orders, bills
api_router = APIRouter()
api_router.include_router(login.router, tags=["login"])
api_router.include_router(users.router, prefix="/users", tags=["users"])
api_router.include_router(utils.router, prefix="/utils", tags=["utils"])
api_router.include_router(items.router, prefix="/items", tags=["items"])
api_router.include_router(orders.router, prefix="/orders", tags=["orders"])
api_router.include_router(bills.router, prefix="/bills", tags=["bills"])
| [
"[email protected]"
]
| |
a6f9be51af3fe5309b5c189c4a9f7c0e1e0b6e37 | 748a4a2d7e710d4c2ab86e60dd58a53368153836 | /control_server/cfg/params.cfg | d69c5e77f16880a31b83a51d18c9ff0e47a38932 | []
| no_license | auviitkgp/kraken_obsolete | ad5d45d330686c66f59ef45000a8d6889706569a | d10acdf570c648097eec21cee5ad07f7608692b4 | refs/heads/master | 2021-04-28T23:04:49.200998 | 2017-01-15T10:44:52 | 2017-01-15T10:44:54 | 77,742,183 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | cfg | #!/usr/bin/env python
PACKAGE = "control_server"
import roslib;roslib.load_manifest(PACKAGE)
from dynamic_reconfigure.parameter_generator import *
gen = ParameterGenerator()
gen.add("off_yaw", double_t, 0, "Kp value", 0, 0, 100)
gen.add("of", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Kil", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Kp_yaw", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kd_yaw", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Ki_yaw", double_t, 0, "A double parameter",0, 0, 100)
gen.add("off_dep_bot", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kib_depth", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Kpb_depth", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kdb_depth", double_t, 0, "A double parameter",0, 0, 100)
gen.add("off_dep_top", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kpt_depth", double_t, 0, "Kp value", 0, 0, 100)
gen.add("Kdt_depth", double_t, 0, "A double parameter",0, 0, 100)
gen.add("Kit_depth", double_t, 0, "A double parameter",0, 0, 100)
# gen.add("bool_param", bool_t, 0, "A Boolean parameter", True)
# size_enum = gen.enum([ gen.const("Small", int_t, 0, "A small constant"),
# gen.const("Medium", int_t, 1, "A medium constant"),
# gen.const("Large", int_t, 2, "A large constant"),
# gen.const("ExtraLarge", int_t, 3, "An extra large constant") ],
# "An enum to set size")
# gen.add("size", int_t, 0, "A size parameter which is edited via an enum", 1, 0, 3, edit_method=size_enum)
exit(gen.generate(PACKAGE, "control_server", "params"))
| [
"[email protected]"
]
| |
287d586f524376706a73cc520fa47eb11e7fea4a | 053d7ca689e41e8ba94c8792f4167a3a2e3069f3 | /urly_bird/bookmarks/admin.py | 32719c44be11c90b61094fc06550e9e6e0ef2bad | []
| no_license | cesarmarroquin/urly-bird-evolved | 691554ee13ea4bfb12ab056b5f2a7a621bfb8e5e | c61e50bc8d13c06b6d431196d532cf45c85dee65 | refs/heads/master | 2020-12-25T23:19:14.149147 | 2015-11-05T16:21:42 | 2015-11-05T16:21:42 | 45,566,498 | 0 | 0 | null | 2015-11-04T20:48:57 | 2015-11-04T20:48:56 | null | UTF-8 | Python | false | false | 360 | py | from django.contrib import admin
from .models import Bookmark, Click
# Register your models here.
@admin.register(Bookmark)
class BookmarkAdmin(admin.ModelAdmin):
list_display = ('id','title','description','bookmark_url', 'timestamp', 'user')
@admin.register(Click)
class ClickAdmin(admin.ModelAdmin):
list_display = ('bookmark','timestamp', 'user') | [
"[email protected]"
]
| |
2303eafc289500d20adb76daad5e488b046b0a2b | 2ce0c37ac7d9beeac23db688f97a1f502b92d13a | /delivery/migrations/0005_remove_outproduct_product.py | ded1fa53f647e9657ed377f967cd85b463d7d543 | []
| no_license | AmrElsayedEG/inventory-system | 0cdb0634b33117b13bfcae8642f979448d831369 | d4bc483612c3b721918d75f24ab0d7fa29b78ce3 | refs/heads/main | 2023-08-20T22:32:25.113740 | 2021-10-04T08:55:44 | 2021-10-04T08:55:44 | 413,344,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | # Generated by Django 3.2 on 2021-09-05 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('delivery', '0004_outdelivery_representitive'),
]
operations = [
migrations.RemoveField(
model_name='outproduct',
name='product',
),
]
| [
"[email protected]"
]
| |
4bcaa3a2e38c083e389b56726a33522ede42a2e7 | 160584ad75ed15f9d39205b6a76b3e5beb03a5cb | /env/lib/python2.7/site-packages/stripe/test/resources/test_recipients.py | 3d90e6eba3d441bde22915717099863dec56df73 | [
"MIT"
]
| permissive | imran1234567/plutus | 1c66c0c29e9e615c03160fb98f14d44507b642dc | c964f18beb139de2645e052eb4c75a6bc0677029 | refs/heads/master | 2022-12-10T04:33:36.906408 | 2019-04-18T06:26:01 | 2019-04-18T06:26:01 | 169,561,380 | 0 | 0 | MIT | 2022-12-08T00:46:54 | 2019-02-07T11:29:44 | CSS | UTF-8 | Python | false | false | 2,028 | py | import stripe
from stripe.test.helper import (StripeResourceTest)
class RecipientTest(StripeResourceTest):
def test_list_recipients(self):
stripe.Recipient.list()
self.requestor_mock.request.assert_called_with(
'get',
'/v1/recipients',
{}
)
def test_recipient_transfers(self):
recipient = stripe.Recipient(id='rp_transfer')
recipient.transfers()
self.requestor_mock.request.assert_called_with(
'get',
'/v1/transfers',
{'recipient': 'rp_transfer'},
)
def test_recipient_add_card(self):
recipient = stripe.Recipient.construct_from({
'id': 'rp_add_card',
'sources': {
'object': 'list',
'url': '/v1/recipients/rp_add_card/sources',
},
}, 'api_key')
recipient.sources.create(card='tok_visa_debit')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/recipients/rp_add_card/sources',
{
'card': 'tok_visa_debit',
},
None
)
def test_recipient_update_card(self):
card = stripe.Card.construct_from({
'recipient': 'rp_update_card',
'id': 'ca_update_card',
}, 'api_key')
card.name = 'The Best'
card.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/recipients/rp_update_card/cards/ca_update_card',
{
'name': 'The Best',
},
None
)
def test_recipient_delete_card(self):
card = stripe.Card.construct_from({
'recipient': 'rp_delete_card',
'id': 'ca_delete_card',
}, 'api_key')
card.delete()
self.requestor_mock.request.assert_called_with(
'delete',
'/v1/recipients/rp_delete_card/cards/ca_delete_card',
{},
None
)
| [
"[email protected]"
]
| |
159249d116b505a9ff916d16137d4404bada9eee | 543660cf8ec69a950be57f95a6fe878b091273ef | /backend/data/rajpurkar/seqmodels/blob/eb1f40710b4ccb81f50b359daefa9e4c48c8130c/window/frame_models/recurrent.py | 77d5c9f302ac774f1a2a70cc0dafc8355d1b118b | [
"MIT"
]
| permissive | nathanhere/neural_complete | 49184982a77f12b9651def6a093a6430cf7d1755 | 050ab3073fe797f3eb2f1a1592c294a8cd081ac6 | refs/heads/master | 2022-12-09T18:40:30.079015 | 2022-02-24T04:46:40 | 2022-02-24T04:46:40 | 87,836,933 | 0 | 1 | MIT | 2022-12-06T22:52:25 | 2017-04-10T17:11:11 | Python | UTF-8 | Python | false | false | 894 | py | """Recurrent neural network model."""
from ...model import KerasModel
from ..window_model import FrameModel
class RecurrentModel(KerasModel, FrameModel):
"""RNN."""
def _create_model(self, input_shape, num_categories):
from keras.layers.core import Activation, Dense, Dropout, Reshape
from keras.models import Sequential
from keras.layers.recurrent import LSTM
model = Sequential()
model.add(
LSTM(
32,
input_shape=input_shape,
return_sequences=True
)
)
model.add(
LSTM(
32,
return_sequences=True,
go_backwards=True
)
)
model.add(LSTM(32, return_sequences=False))
model.add(Dense(num_categories))
model.add(Activation('softmax'))
return model | [
"[email protected]"
]
| |
0dba882d5a1cdf7ebeaf1f3fd2767a51ea7990e5 | 1d363dfbe69b79bc1989251f085060232beb12f5 | /tests/test_flash_dry_air.py | 59d163bcae94670847b9b2d794d39e3c52674277 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
]
| permissive | CalebBell/thermo | ec602af2316875692e385287c6010e9f206b1bc3 | 8622fada3614179d4372192e0031b4a206384c93 | refs/heads/master | 2023-08-30T05:30:07.552575 | 2023-06-25T01:35:53 | 2023-06-25T01:35:53 | 62,404,647 | 529 | 127 | MIT | 2023-08-11T18:31:21 | 2016-07-01T16:04:56 | Python | UTF-8 | Python | false | false | 6,407 | py | '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2021, Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os
from math import *
import numpy as np
import pytest
from fluids.numerics import *
import thermo
from thermo import *
from thermo.chemical_package import lemmon2000_constants, lemmon2000_correlations
from thermo.coolprop import *
from thermo.phases import DryAirLemmon
from thermo.test_utils import mark_plot_unsupported
try:
import matplotlib.pyplot as plt
except:
pass
fluid = 'air'
pure_surfaces_dir = os.path.join(thermo.thermo_dir, '..', 'surfaces', 'lemmon2000')
@pytest.mark.plot
@pytest.mark.slow
@pytest.mark.parametric
@pytest.mark.parametrize("variables", ['VPT', 'VTP',
'PHT', 'PST', 'PUT',
'VUT', 'VST', 'VHT',
'TSV', # Had to increase the tolerance
'THP', 'TUP', # Not consistent, warning message added
])
def test_plot_lemmon2000(variables):
spec0, spec1, check_prop = variables
plot_name = variables[0:2]
eos = DryAirLemmon
T, P = 298.15, 101325.0
gas = DryAirLemmon(T=T, P=P)
flasher = FlashPureVLS(constants=lemmon2000_constants, correlations=lemmon2000_correlations,
gas=gas, liquids=[], solids=[])
flasher.TPV_HSGUA_xtol = 1e-14
inconsistent = frozenset([spec0, spec1]) in (frozenset(['T', 'H']), frozenset(['T', 'U']))
res = flasher.TPV_inputs(zs=[1.0], pts=200, spec0='T', spec1='P',
check0=spec0, check1=spec1, prop0=check_prop,
trunc_err_low=1e-13,
trunc_err_high=1, color_map=cm_flash_tol(),
show=False, verbose=not inconsistent)
matrix_spec_flashes, matrix_flashes, errs, plot_fig = res
path = os.path.join(pure_surfaces_dir, fluid, plot_name)
if not os.path.exists(path):
os.makedirs(path)
tol = 1e-13
key = f'{plot_name} - {eos.__name__} - {fluid}'
if inconsistent:
spec_name = spec0 + spec1
mark_plot_unsupported(plot_fig, reason='EOS is inconsistent for %s inputs' %(spec_name))
tol = 1e300
plot_fig.savefig(os.path.join(path, key + '.png'))
plt.close()
max_err = np.max(np.abs(errs))
assert max_err < tol
# test_plot_lemmon2000('VUT')
# test_plot_lemmon2000('THP')
def test_lemmon2000_case_issues():
gas = DryAirLemmon(T=300.0, P=1e5)
flasher = FlashPureVLS(constants=lemmon2000_constants, correlations=lemmon2000_correlations,
gas=gas, liquids=[], solids=[])
# Cases which were failing because of the iteration variable of P when V specified
# It is actually less efficient for this type of EOS
PT = flasher.flash(T=1000.0, P=1e3)
V = PT.V()
U = PT.U()
res = flasher.flash(V=V, U=U)
assert_close(PT.T, res.T, rtol=1e-10)
assert_close(PT.P, res.P, rtol=1e-10)
S = PT.S()
res = flasher.flash(V=V, S=S)
assert_close(PT.T, res.T, rtol=1e-10)
assert_close(PT.P, res.P, rtol=1e-10)
H = PT.H()
res = flasher.flash(V=V, H=H)
assert_close(PT.T, res.T, rtol=1e-10)
# Check we can't do a vapor fraction flash
with pytest.raises(ValueError):
flasher.flash(T=400, SF=.5)
with pytest.raises(ValueError):
flasher.flash(T=400, VF=.5)
# Check that the minimum temperature of the phases is respected
with pytest.raises(ValueError):
flasher.flash(T=132.6312, P=1e3)
PT = flasher.flash(T=2000., P=3827.4944785162643)
V = PT.V()
U = PT.U()
res = flasher.flash(V=V, U=U)
assert_close(PT.T, res.T, rtol=1e-10)
assert_close(PT.P, res.P, rtol=1e-10)
# Inconsistent TH point in fundamental formulation
PT1 = flasher.flash(T=610.7410404288737, P=6150985.788580353)
PT2 = flasher.flash(T=610.7410404288737, P=3967475.2794698337)
assert_close(PT1.H(), PT2.H())
# There are a ton of low-pressure points too
PT1 = flasher.flash(T=484.38550361282495, P=0.027682980294306617)
PT2 = flasher.flash(T=484.38550361282495, P=0.02768286630392061)
assert_close(PT1.H(), PT2.H())
# Inconsistent TU point in fundamental formulation
PT1 = flasher.flash(T=1652.4510785539342, P=519770184.42714685,)
PT2 = flasher.flash(T=1652.4510785539342, P=6985879.746785077)
assert_close(PT1.U(), PT2.U(), rtol=1e-10)
"""
Ps = logspace(log10(6985879.746785077/2), log10(519770184.42714685*2), 2000)
Us = [flasher.flash(T=1652.4510785539342, P=P).U() for P in Ps ]
"""
def test_lemmon2000_properties():
gas = DryAirLemmon(T=300.0, P=1000e5)
flasher = FlashPureVLS(constants=lemmon2000_constants, correlations=lemmon2000_correlations,
gas=gas, liquids=[], solids=[])
# Isentropic exponents
res = flasher.flash(T=300.0, P=1000e5)
for obj in (res, res.bulk, gas, res.gas):
assert_close(obj.isentropic_exponent(), 4.100576762582646, rtol=1e-12)
assert_close(obj.isentropic_exponent_PV(), 4.100576762582646, rtol=1e-12)
assert_close(obj.isentropic_exponent_PT(), 1.3248727035044343, rtol=1e-12)
assert_close(obj.isentropic_exponent_TV(), 2.0055044950839136, rtol=1e-12)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.