repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
rven/odoo
|
refs/heads/14.0-fix-partner-merge-mail-activity
|
addons/payment_transfer/__init__.py
|
15
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import models
from . import controllers
from odoo.addons.payment.models.payment_acquirer import create_missing_journal_for_acquirers
from odoo.addons.payment import reset_payment_provider
def uninstall_hook(cr, registry):
reset_payment_provider(cr, registry, 'transfer')
|
cxxgtxy/tensorflow
|
refs/heads/master
|
tensorflow/contrib/graph_editor/tests/edit_test.py
|
132
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import graph_editor as ge
from tensorflow.contrib.graph_editor.tests import match
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class EditTest(test.TestCase):
"""edit module test.
Generally the tests are in two steps:
- modify an existing graph.
- then make sure it has the expected topology using the graph matcher.
"""
def setUp(self):
self.graph = ops.Graph()
with self.graph.as_default():
self.a = constant_op.constant([1., 1.], shape=[2], name="a")
with ops.name_scope("foo"):
self.b = constant_op.constant([2., 2.], shape=[2], name="b")
self.c = math_ops.add(self.a, self.b, name="c")
self.d = constant_op.constant([3., 3.], shape=[2], name="d")
with ops.name_scope("bar"):
self.e = math_ops.add(self.c, self.d, name="e")
self.f = math_ops.add(self.c, self.d, name="f")
self.g = math_ops.add(self.c, self.a, name="g")
with ops.control_dependencies([self.c.op]):
self.h = math_ops.add(self.f, self.g, name="h")
def test_detach(self):
"""Test for ge.detach."""
sgv = ge.sgv(self.c.op, self.a.op)
control_outputs = ge.ControlOutputs(self.graph)
ge.detach(sgv, control_ios=control_outputs)
# make sure the detached graph is as expected.
self.assertTrue(
match.OpMatcher("^foo/c$").input_ops("a", "geph__b_0")(self.c.op))
def test_connect(self):
"""Test for ge.connect."""
with self.graph.as_default():
x = constant_op.constant([1., 1.], shape=[2], name="x")
y = constant_op.constant([2., 2.], shape=[2], name="y")
z = math_ops.add(x, y, name="z")
sgv = ge.sgv(x.op, y.op, z.op)
ge.connect(sgv, ge.sgv(self.e.op).remap_inputs([0]))
self.assertTrue(
match.OpMatcher("^foo/bar/e$").input_ops("^z$", "foo/d$")(self.e.op))
def test_bypass(self):
"""Test for ge.bypass."""
ge.bypass(ge.sgv(self.f.op).remap_inputs([0]))
self.assertTrue(
match.OpMatcher("^foo/bar/h$").input_ops("^foo/c$", "foo/bar/g$")(
self.h.op))
if __name__ == "__main__":
test.main()
|
nouyang/Metrify-Hardware
|
refs/heads/master
|
FTDItest eagle+fw/rx.py
|
1
|
#!/usr/bin/env python
#
# rx.py
#
# rx.py serial_port port_speed
#
# Neil Gershenfeld
# CBA MIT 7/27/07
#
# (c) Massachusetts Institute of Technology 2007
# Permission granted for experimental and personal use;
# license for commercial sale available from MIT.
#
import serial, sys
if (len(sys.argv) != 3):
print "command line: rx.py serial_port port_speed"
sys.exit()
port = sys.argv[1]
speed = int(sys.argv[2])
ser = serial.Serial(port,speed)
ser.setDTR()
ser.flushInput()
count = 0
while 1:
count += 1
x = ser.read()
print '%d:'%count,x,' (dec %d'%ord(x),' hex %x)'%ord(x)
|
bartvm/powerline
|
refs/heads/develop
|
powerline/renderers/shell/zsh.py
|
38
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.renderers.shell import ShellRenderer
class ZshPromptRenderer(ShellRenderer):
'''Powerline zsh prompt segment renderer.'''
escape_hl_start = '%{'
escape_hl_end = '%}'
character_translations = ShellRenderer.character_translations.copy()
character_translations[ord('%')] = '%%'
renderer = ZshPromptRenderer
|
hsaputra/tensorflow
|
refs/heads/master
|
tensorflow/contrib/metrics/python/ops/metric_ops.py
|
3
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains metric-computing operations on streamed tensors.
Module documentation, including "@@" callouts, should be put in
third_party/tensorflow/contrib/metrics/__init__.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import metrics_impl
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.util.deprecation import deprecated
# Epsilon constant used to represent extremely small quantity.
_EPSILON = 1e-7
def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
numerator: A real `Tensor`.
denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.truediv(numerator, denominator),
0,
name=name)
def streaming_true_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_true_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of true_negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.true_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Sum the weights of false positives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.false_positives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_negatives(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the total number of false negatives.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
value_tensor: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the error from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.false_negatives(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the (weighted) mean of the given values.
The `streaming_mean` function creates two local variables, `total` and `count`
that are used to compute the average of `values`. This average is ultimately
returned as `mean` which is an idempotent operation that simply divides
`total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A `Tensor` representing the current mean, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_tensor(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the element-wise (weighted) mean of the given tensors.
In contrast to the `streaming_mean` function which returns a scalar with the
mean, this function returns an average tensor with the same shape as the
input tensors.
The `streaming_mean_tensor` function creates two local variables,
`total_tensor` and `count_tensor` that are used to compute the average of
`values`. This average is ultimately returned as `mean` which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean`.
`update_op` increments `total` with the reduced sum of the product of `values`
and `weights`, and it increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions.
weights: `Tensor` whose rank is either 0, or the same rank as `values`, and
must be broadcastable to `values` (i.e., all dimensions must be either
`1`, or the same as the corresponding `values` dimension).
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
should be added to.
name: An optional variable_scope name.
Returns:
mean: A float `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.mean_tensor(
values=values,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None, 'Please switch to tf.metrics.accuracy. Note that the order of the '
'labels and predictions arguments has been switched.')
def streaming_accuracy(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculates how often `predictions` matches `labels`.
The `streaming_accuracy` function creates two local variables, `total` and
`count` that are used to compute the frequency with which `predictions`
matches `labels`. This frequency is ultimately returned as `accuracy`: an
idempotent operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of any shape.
labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
accuracy: A `Tensor` representing the accuracy, the value of `total` divided
by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `accuracy`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.accuracy(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_precision(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the precision of the predictions with respect to the labels.
The `streaming_precision` function creates two local variables,
`true_positives` and `false_positives`, that are used to compute the
precision. This value is ultimately returned as `precision`, an idempotent
operation that simply divides `true_positives` by the sum of `true_positives`
and `false_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`. `update_op` weights each prediction by the corresponding value in
`weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
`precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_recall(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall of the predictions with respect to the labels.
The `streaming_recall` function creates two local variables, `true_positives`
and `false_negatives`, that are used to compute the recall. This value is
ultimately returned as `recall`, an idempotent operation that simply divides
`true_positives` by the sum of `true_positives` and `false_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` that updates these variables and returns the `recall`. `update_op`
weights each prediction by the corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
`recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false positive rate of predictions with respect to labels.
The `false_positive_rate` function creates two local variables,
`false_positives` and `true_negatives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_positive_rate`, an idempotent operation that simply divides
`false_positives` by the sum of `false_positives` and `true_negatives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: Scalar float `Tensor` with the value of
`false_positives` divided by the sum of `false_positives` and
`true_negatives`.
update_op: `Operation` that increments `false_positives` and
`true_negatives` variables appropriately and whose value matches
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = metrics.false_positives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = metrics.true_negatives(
labels=labels,
predictions=predictions,
weights=weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fpr(fp, tn, name):
return array_ops.where(
math_ops.greater(fp + tn, 0), math_ops.div(fp, fp + tn), 0, name)
fpr = compute_fpr(false_p, true_n, 'value')
update_op = compute_fpr(false_positives_update_op, true_negatives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the false negative rate of predictions with respect to labels.
The `false_negative_rate` function creates two local variables,
`false_negatives` and `true_positives`, that are used to compute the
false positive rate. This value is ultimately returned as
`false_negative_rate`, an idempotent operation that simply divides
`false_negatives` by the sum of `false_negatives` and `true_positives`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_negative_rate`. `update_op` weights each prediction by the
corresponding value in `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
be cast to `bool`.
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: Scalar float `Tensor` with the value of
`false_negatives` divided by the sum of `false_negatives` and
`true_positives`.
update_op: `Operation` that increments `false_negatives` and
`true_positives` variables appropriately and whose value matches
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_n, false_negatives_update_op = metrics.false_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_p, true_positives_update_op = metrics.true_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_fnr(fn, tp, name):
return array_ops.where(
math_ops.greater(fn + tp, 0), math_ops.div(fn, fn + tp), 0, name)
fnr = compute_fnr(false_n, true_p, 'value')
update_op = compute_fnr(false_negatives_update_op, true_positives_update_op,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _streaming_confusion_matrix_at_thresholds(predictions,
labels,
thresholds,
weights=None,
includes=None):
"""Computes true_positives, false_negatives, true_negatives, false_positives.
This function creates up to four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives`.
`true_positive[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `True`.
`false_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `True`.
`true_negatives[i]` is defined as the total weight of values in `predictions`
at most `thresholds[i]` whose corresponding entry in `labels` is `False`.
`false_positives[i]` is defined as the total weight of values in `predictions`
above `thresholds[i]` whose corresponding entry in `labels` is `False`.
For estimation of these metrics over a stream of data, for each metric the
function respectively creates an `update_op` operation that updates the
variable and returns its value.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
includes: Tuple of keys to return, from 'tp', 'fn', 'tn', fp'. If `None`,
default to all four.
Returns:
values: Dict of variables of shape `[len(thresholds)]`. Keys are from
`includes`.
update_ops: Dict of operations that increments the `values`. Keys are from
`includes`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
all_includes = ('tp', 'fn', 'tn', 'fp')
if includes is None:
includes = all_includes
else:
for include in includes:
if include not in all_includes:
raise ValueError('Invaild key: %s.' % include)
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
num_thresholds = len(thresholds)
# Reshape predictions and labels.
predictions_2d = array_ops.reshape(predictions, [-1, 1])
labels_2d = array_ops.reshape(
math_ops.cast(labels, dtype=dtypes.bool), [1, -1])
# Use static shape if known.
num_predictions = predictions_2d.get_shape().as_list()[0]
# Otherwise use dynamic shape.
if num_predictions is None:
num_predictions = array_ops.shape(predictions_2d)[0]
thresh_tiled = array_ops.tile(
array_ops.expand_dims(array_ops.constant(thresholds), [1]),
array_ops.stack([1, num_predictions]))
# Tile the predictions after thresholding them across different thresholds.
pred_is_pos = math_ops.greater(
array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),
thresh_tiled)
if ('fn' in includes) or ('tn' in includes):
pred_is_neg = math_ops.logical_not(pred_is_pos)
# Tile labels by number of thresholds
label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])
if ('fp' in includes) or ('tn' in includes):
label_is_neg = math_ops.logical_not(label_is_pos)
if weights is not None:
broadcast_weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), predictions)
weights_tiled = array_ops.tile(
array_ops.reshape(broadcast_weights, [1, -1]), [num_thresholds, 1])
thresh_tiled.get_shape().assert_is_compatible_with(
weights_tiled.get_shape())
else:
weights_tiled = None
values = {}
update_ops = {}
if 'tp' in includes:
true_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_positives')
is_true_positive = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_pos))
if weights_tiled is not None:
is_true_positive *= weights_tiled
update_ops['tp'] = state_ops.assign_add(true_positives,
math_ops.reduce_sum(
is_true_positive, 1))
values['tp'] = true_positives
if 'fn' in includes:
false_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_negatives')
is_false_negative = math_ops.to_float(
math_ops.logical_and(label_is_pos, pred_is_neg))
if weights_tiled is not None:
is_false_negative *= weights_tiled
update_ops['fn'] = state_ops.assign_add(false_negatives,
math_ops.reduce_sum(
is_false_negative, 1))
values['fn'] = false_negatives
if 'tn' in includes:
true_negatives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='true_negatives')
is_true_negative = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_neg))
if weights_tiled is not None:
is_true_negative *= weights_tiled
update_ops['tn'] = state_ops.assign_add(true_negatives,
math_ops.reduce_sum(
is_true_negative, 1))
values['tn'] = true_negatives
if 'fp' in includes:
false_positives = metrics_impl.metric_variable(
[num_thresholds], dtypes.float32, name='false_positives')
is_false_positive = math_ops.to_float(
math_ops.logical_and(label_is_neg, pred_is_pos))
if weights_tiled is not None:
is_false_positive *= weights_tiled
update_ops['fp'] = state_ops.assign_add(false_positives,
math_ops.reduce_sum(
is_false_positive, 1))
values['fp'] = false_positives
return values, update_ops
def streaming_true_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tp',))
return values['tp'], update_ops['tp']
def streaming_false_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fn',))
return values['fn'], update_ops['fn']
def streaming_false_positives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('fp',))
return values['fp'], update_ops['fp']
def streaming_true_negatives_at_thresholds(predictions,
labels,
thresholds,
weights=None):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights=weights, includes=('tn',))
return values['tn'], update_ops['tn']
def streaming_curve_points(labels=None,
predictions=None,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes curve (ROC or PR) values for a prespecified number of points.
The `streaming_curve_points` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
that are used to compute the curve values. To discretize the curve, a linearly
spaced set of thresholds is used to compute pairs of recall and precision
values.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: A `Tensor` whose shape matches `predictions`. Will be cast to
`bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
points: A `Tensor` with shape [num_thresholds, 2] that contains points of
the curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
TODO(chizeng): Consider rewriting this method to make use of logic within the
precision_recall_at_equal_thresholds method (to improve run time).
"""
with variable_scope.variable_scope(name, 'curve_points',
(labels, predictions, weights)):
if curve != 'ROC' and curve != 'PR':
raise ValueError('curve must be either ROC or PR, %s unknown' % (curve))
kepsilon = _EPSILON # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
labels=labels,
predictions=predictions,
thresholds=thresholds,
weights=weights)
# Add epsilons to avoid dividing by 0.
epsilon = 1.0e-6
def compute_points(tp, fn, tn, fp):
"""Computes the roc-auc or pr-auc based on confusion counts."""
rec = math_ops.div(tp + epsilon, tp + fn + epsilon)
if curve == 'ROC':
fp_rate = math_ops.div(fp, fp + tn + epsilon)
return fp_rate, rec
else: # curve == 'PR'.
prec = math_ops.div(tp + epsilon, tp + fp + epsilon)
return rec, prec
xs, ys = compute_points(values['tp'], values['fn'], values['tn'],
values['fp'])
points = array_ops.stack([xs, ys], axis=1)
update_op = control_flow_ops.group(*update_ops.values())
if metrics_collections:
ops.add_to_collections(metrics_collections, points)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return points, update_op
@deprecated(
None, 'Please switch to tf.metrics.auc. Note that the order of the '
'labels and predictions arguments has been switched.')
def streaming_auc(predictions,
labels,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
curve='ROC',
name=None):
"""Computes the approximate AUC via a Riemann sum.
The `streaming_auc` function creates four local variables, `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` that are used to
compute the AUC. To discretize the AUC curve, a linearly spaced set of
thresholds is used to compute pairs of recall and precision values. The area
under the ROC-curve is therefore computed using the height of the recall
values by the false positive rate, while the area under the PR-curve is the
computed using the height of the precision values by the recall.
This value is ultimately returned as `auc`, an idempotent operation that
computes the area under a discretized curve of precision versus recall values
(computed using the aforementioned variables). The `num_thresholds` variable
controls the degree of discretization with larger numbers of thresholds more
closely approximating the true AUC. The quality of the approximation may vary
dramatically depending on `num_thresholds`.
For best results, `predictions` should be distributed approximately uniformly
in the range [0, 1] and not peaked around 0 or 1. The quality of the AUC
approximation may be poor if this is not the case.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `auc`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
curve: Specifies the name of the curve to be computed, 'ROC' [default] or
'PR' for the Precision-Recall-curve.
name: An optional variable_scope name.
Returns:
auc: A scalar `Tensor` representing the current area-under-curve.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `auc`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.auc(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
num_thresholds=num_thresholds,
curve=curve,
updates_collections=updates_collections,
name=name)
def _compute_dynamic_auc(labels, predictions, curve='ROC'):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This could be slow for large batches, but has the advantage of not
having its results degrade depending on the distribution of predictions.
Args:
labels: A `Tensor` of ground truth labels with the same shape as
`predictions` with values of 0 or 1 and type `int64`.
predictions: A 1-D `Tensor` of predictions whose values are `float64`.
curve: The name of the curve to be computed, 'ROC' for the Receiving
Operating Characteristic or 'PR' for the Precision-Recall curve.
Returns:
A scalar `Tensor` containing the area-under-curve value for the input.
"""
# Count the total number of positive and negative labels in the input.
size = array_ops.size(predictions)
total_positive = math_ops.cast(math_ops.reduce_sum(labels), dtypes.int32)
def continue_computing_dynamic_auc():
"""Continues dynamic auc computation, entered if labels are not all equal.
Returns:
A scalar `Tensor` containing the area-under-curve value.
"""
# Sort the predictions descending, and the corresponding labels as well.
ordered_predictions, indices = nn.top_k(predictions, k=size)
ordered_labels = array_ops.gather(labels, indices)
# Get the counts of the unique ordered predictions.
_, _, counts = array_ops.unique_with_counts(ordered_predictions)
# Compute the indices of the split points between different predictions.
splits = math_ops.cast(
array_ops.pad(math_ops.cumsum(counts), paddings=[[1, 0]]), dtypes.int32)
# Count the positives to the left of the split indices.
positives = math_ops.cast(
array_ops.pad(math_ops.cumsum(ordered_labels), paddings=[[1, 0]]),
dtypes.int32)
true_positives = array_ops.gather(positives, splits)
if curve == 'ROC':
# Count the negatives to the left of every split point and the total
# number of negatives for computing the FPR.
false_positives = math_ops.subtract(splits, true_positives)
total_negative = size - total_positive
x_axis_values = math_ops.truediv(false_positives, total_negative)
y_axis_values = math_ops.truediv(true_positives, total_positive)
elif curve == 'PR':
x_axis_values = math_ops.truediv(true_positives, total_positive)
# For conformance, set precision to 1 when the number of positive
# classifications is 0.
y_axis_values = array_ops.where(
math_ops.greater(splits, 0),
math_ops.truediv(true_positives, splits),
array_ops.ones_like(true_positives, dtype=dtypes.float64))
# Calculate trapezoid areas.
heights = math_ops.add(y_axis_values[1:], y_axis_values[:-1]) / 2.0
widths = math_ops.abs(
math_ops.subtract(x_axis_values[1:], x_axis_values[:-1]))
return math_ops.reduce_sum(math_ops.multiply(heights, widths))
# If all the labels are the same, AUC isn't well-defined (but raising an
# exception seems excessive) so we return 0, otherwise we finish computing.
return control_flow_ops.cond(
math_ops.logical_or(
math_ops.equal(total_positive, 0),
math_ops.equal(total_positive, size)
),
true_fn=lambda: array_ops.constant(0, dtypes.float64),
false_fn=continue_computing_dynamic_auc)
def streaming_dynamic_auc(labels,
predictions,
curve='ROC',
metrics_collections=(),
updates_collections=(),
name=None):
"""Computes the apporixmate AUC by a Riemann sum with data-derived thresholds.
USAGE NOTE: this approach requires storing all of the predictions and labels
for a single evaluation in memory, so it may not be usable when the evaluation
batch size and/or the number of evaluation steps is very large.
Computes the area under the ROC or PR curve using each prediction as a
threshold. This has the advantage of being resilient to the distribution of
predictions by aggregating across batches, accumulating labels and predictions
and performing the final calculation using all of the concatenated values.
Args:
labels: A `Tensor` of ground truth labels with the same shape as `labels`
and with values of 0 or 1 whose values are castable to `int64`.
predictions: A `Tensor` of predictions whose values are castable to
`float64`. Will be flattened into a 1-D `Tensor`.
curve: The name of the curve for which to compute AUC, 'ROC' for the
Receiving Operating Characteristic or 'PR' for the Precision-Recall curve.
metrics_collections: An optional iterable of collections that `auc` should
be added to.
updates_collections: An optional iterable of collections that `update_op`
should be added to.
name: An optional name for the variable_scope that contains the metric
variables.
Returns:
auc: A scalar `Tensor` containing the current area-under-curve value.
update_op: An operation that concatenates the input labels and predictions
to the accumulated values.
Raises:
ValueError: If `labels` and `predictions` have mismatched shapes or if
`curve` isn't a recognized curve type.
"""
if curve not in ['PR', 'ROC']:
raise ValueError('curve must be either ROC or PR, %s unknown' % curve)
with variable_scope.variable_scope(name, default_name='dynamic_auc'):
labels.get_shape().assert_is_compatible_with(predictions.get_shape())
predictions = array_ops.reshape(
math_ops.cast(predictions, dtypes.float64), [-1])
labels = array_ops.reshape(math_ops.cast(labels, dtypes.int64), [-1])
with ops.control_dependencies([
check_ops.assert_greater_equal(
labels,
array_ops.zeros_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is <0'),
check_ops.assert_less_equal(
labels,
array_ops.ones_like(labels, dtypes.int64),
message='labels must be 0 or 1, at least one is >1')
]):
preds_accum, update_preds = streaming_concat(predictions,
name='concat_preds')
labels_accum, update_labels = streaming_concat(labels,
name='concat_labels')
update_op = control_flow_ops.group(update_labels, update_preds)
auc = _compute_dynamic_auc(labels_accum, preds_accum, curve=curve)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, auc)
return auc, update_op
def precision_recall_at_equal_thresholds(labels,
predictions,
weights=None,
num_thresholds=None,
use_locking=None,
name=None):
"""A helper method for creating metrics related to precision-recall curves.
These values are true positives, false negatives, true negatives, false
positives, precision, and recall. This function returns a data structure that
contains ops within it.
Unlike _streaming_confusion_matrix_at_thresholds (which exhibits O(T * N)
space and run time), this op exhibits O(T + N) space and run time, where T is
the number of thresholds and N is the size of the predictions tensor. Hence,
it may be advantageous to use this function when `predictions` is big.
For instance, prefer this method for per-pixel classification tasks, for which
the predictions tensor may be very large.
Each number in `predictions`, a float in `[0, 1]`, is compared with its
corresponding label in `labels`, and counts as a single tp/fp/tn/fn value at
each threshold. This is then multiplied with `weights` which can be used to
reweight certain values, or more commonly used for masking values.
Args:
labels: A bool `Tensor` whose shape matches `predictions`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
weights: Optional; If provided, a `Tensor` that has the same dtype as,
and broadcastable to, `predictions`. This tensor is multplied by counts.
num_thresholds: Optional; Number of thresholds, evenly distributed in
`[0, 1]`. Should be `>= 2`. Defaults to 201. Note that the number of bins
is 1 less than `num_thresholds`. Using an even `num_thresholds` value
instead of an odd one may yield unfriendly edges for bins.
use_locking: Optional; If True, the op will be protected by a lock.
Otherwise, the behavior is undefined, but may exhibit less contention.
Defaults to True.
name: Optional; variable_scope name. If not provided, the string
'precision_recall_at_equal_threshold' is used.
Returns:
result: A named tuple (See PrecisionRecallData within the implementation of
this function) with properties that are variables of shape
`[num_thresholds]`. The names of the properties are tp, fp, tn, fn,
precision, recall, thresholds.
update_op: An op that accumulates values.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`includes` contains invalid keys.
"""
# Disable the invalid-name checker so that we can capitalize the name.
# pylint: disable=invalid-name
PrecisionRecallData = collections_lib.namedtuple(
'PrecisionRecallData',
['tp', 'fp', 'tn', 'fn', 'precision', 'recall', 'thresholds'])
# pylint: enable=invalid-name
if num_thresholds is None:
num_thresholds = 201
if weights is None:
weights = 1.0
if use_locking is None:
use_locking = True
check_ops.assert_type(labels, dtypes.bool)
dtype = predictions.dtype
with variable_scope.variable_scope(name,
'precision_recall_at_equal_thresholds',
(labels, predictions, weights)):
# Make sure that predictions are within [0.0, 1.0].
with ops.control_dependencies([
check_ops.assert_greater_equal(
predictions,
math_ops.cast(0.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]'),
check_ops.assert_less_equal(
predictions,
math_ops.cast(1.0, dtype=predictions.dtype),
message='predictions must be in [0, 1]')
]):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=predictions,
labels=labels,
weights=weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# We cast to float to ensure we have 0.0 or 1.0.
f_labels = math_ops.cast(labels, dtype)
# Get weighted true/false labels.
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Flatten predictions and labels.
predictions = array_ops.reshape(predictions, [-1])
true_labels = array_ops.reshape(true_labels, [-1])
false_labels = array_ops.reshape(false_labels, [-1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
#
# This implementation exhibits a run time and space complexity of O(T + N),
# where T is the number of thresholds and N is the size of predictions.
# Metrics that rely on _streaming_confusion_matrix_at_thresholds instead
# exhibit a complexity of O(T * N).
# Compute the bucket indices for each prediction value.
bucket_indices = math_ops.cast(
math_ops.floor(predictions * (num_thresholds - 1)), dtypes.int32)
with ops.name_scope('variables'):
tp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], dtype, name='tp_buckets')
fp_buckets_v = metrics_impl.metric_variable(
[num_thresholds], dtype, name='fp_buckets')
with ops.name_scope('update_op'):
update_tp = state_ops.scatter_add(
tp_buckets_v, bucket_indices, true_labels, use_locking=use_locking)
update_fp = state_ops.scatter_add(
fp_buckets_v, bucket_indices, false_labels, use_locking=use_locking)
# Set up the cumulative sums to compute the actual metrics.
tp = math_ops.cumsum(tp_buckets_v, reverse=True, name='tp')
fp = math_ops.cumsum(fp_buckets_v, reverse=True, name='fp')
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
# We use a minimum to prevent division by 0.
epsilon = 1e-7
precision = tp / math_ops.maximum(epsilon, tp + fp)
recall = tp / math_ops.maximum(epsilon, tp + fn)
result = PrecisionRecallData(
tp=tp,
fp=fp,
tn=tn,
fn=fn,
precision=precision,
recall=recall,
thresholds=math_ops.lin_space(0.0, 1.0, num_thresholds))
update_op = control_flow_ops.group(update_tp, update_fp)
return result, update_op
def streaming_specificity_at_sensitivity(predictions,
labels,
sensitivity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the specificity at a given sensitivity.
The `streaming_specificity_at_sensitivity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the specificity at the given
sensitivity value. The threshold for the given sensitivity value is computed
and used to evaluate the corresponding specificity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`specificity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
specificity: A scalar `Tensor` representing the specificity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `specificity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`sensitivity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.specificity_at_sensitivity(
sensitivity=sensitivity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sensitivity_at_specificity(predictions,
labels,
specificity,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the sensitivity at a given specificity.
The `streaming_sensitivity_at_specificity` function creates four local
variables, `true_positives`, `true_negatives`, `false_positives` and
`false_negatives` that are used to compute the sensitivity at the given
specificity value. The threshold for the given specificity value is computed
and used to evaluate the corresponding sensitivity.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`sensitivity`. `update_op` increments the `true_positives`, `true_negatives`,
`false_positives` and `false_negatives` counts with the weight of each case
found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
For additional information about specificity and sensitivity, see the
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
sensitivity: A scalar `Tensor` representing the sensitivity at the given
`specificity` value.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables
appropriately and whose value matches `sensitivity`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`specificity` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
return metrics.sensitivity_at_specificity(
specificity=specificity,
num_thresholds=num_thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None, 'Please switch to tf.metrics.precision_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_precision_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision values for different `thresholds` on `predictions`.
The `streaming_precision_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `precision[i]` is defined as the total
weight of values in `predictions` above `thresholds[i]` whose corresponding
entry in `labels` is `True`, divided by the total weight of values in
`predictions` above `thresholds[i]` (`true_positives[i] / (true_positives[i] +
false_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
precision: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `precision`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.precision_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(
None, 'Please switch to tf.metrics.recall_at_thresholds. Note that the '
'order of the labels and predictions arguments has been switched.')
def streaming_recall_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various recall values for different `thresholds` on `predictions`.
The `streaming_recall_at_thresholds` function creates four local variables,
`true_positives`, `true_negatives`, `false_positives` and `false_negatives`
for various values of thresholds. `recall[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `True`, divided by the total weight of `True` values in `labels`
(`true_positives[i] / (true_positives[i] + false_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `recall`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `true_positives`,
`true_negatives`, `false_positives` and `false_negatives` variables that
are used in the computation of `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.recall_at_thresholds(
thresholds=thresholds,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_false_positive_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fpr values for different `thresholds` on `predictions`.
The `streaming_false_positive_rate_at_thresholds` function creates two
local variables, `false_positives`, `true_negatives`, for various values of
thresholds. `false_positive_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `False` values in `labels`
(`false_positives[i] / (false_positives[i] + true_negatives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_positive_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_positive_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_positives` and
`true_negatives` variables that are used in the computation of
`false_positive_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_positive_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fp', 'tn'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fpr(fp, tn, name):
return math_ops.div(fp, epsilon + fp + tn, name='fpr_' + name)
fpr = compute_fpr(values['fp'], values['tn'], 'value')
update_op = compute_fpr(update_ops['fp'], update_ops['tn'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fpr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fpr, update_op
def streaming_false_negative_rate_at_thresholds(predictions,
labels,
thresholds,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes various fnr values for different `thresholds` on `predictions`.
The `streaming_false_negative_rate_at_thresholds` function creates two
local variables, `false_negatives`, `true_positives`, for various values of
thresholds. `false_negative_rate[i]` is defined as the total weight
of values in `predictions` above `thresholds[i]` whose corresponding entry in
`labels` is `False`, divided by the total weight of `True` values in `labels`
(`false_negatives[i] / (false_negatives[i] + true_positives[i])`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`false_positive_rate`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`false_negative_rate` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
false_negative_rate: A float `Tensor` of shape `[len(thresholds)]`.
update_op: An operation that increments the `false_negatives` and
`true_positives` variables that are used in the computation of
`false_negative_rate`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
with variable_scope.variable_scope(name, 'false_negative_rate_at_thresholds',
(predictions, labels, weights)):
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights, includes=('fn', 'tp'))
# Avoid division by zero.
epsilon = _EPSILON
def compute_fnr(fn, tp, name):
return math_ops.div(fn, epsilon + fn + tp, name='fnr_' + name)
fnr = compute_fnr(values['fn'], values['tp'], 'value')
update_op = compute_fnr(update_ops['fn'], update_ops['tp'], 'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, fnr)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return fnr, update_op
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
def streaming_recall_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
increments `count` with the reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A float `Tensor` of dimension [batch_size, num_classes].
labels: A `Tensor` of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
weights: `Tensor` whose rank is either 0, or the same rank as `labels`, and
must be broadcastable to `labels` (i.e., all dimensions must be either
`1`, or the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
recall_at_k: A `Tensor` representing the recall@k, the fraction of labels
which fall into the top `k` predictions.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `recall_at_k`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
in_top_k = math_ops.to_float(nn.in_top_k(predictions, labels, k))
return streaming_mean(in_top_k, weights, metrics_collections,
updates_collections, name or _at_k_name('recall', k))
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_recall_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we'll calculate recall as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
actual positives (the full `labels` row).
If `class_id` is specified, we calculate recall by considering only the rows
in the batch for which `class_id` is in `labels`, and computing the
fraction of them for which `class_id` is in the corresponding row in
`labels`.
`streaming_sparse_recall_at_k` creates two local variables,
`true_positive_at_<k>` and `false_negative_at_<k>`, that are used to compute
the recall_at_k frequency. This frequency is ultimately returned as
`recall_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_negative_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
Values should be in range [0, num_classes), where num_classes is the last
dimension of `predictions`. Values outside this range always count
towards `false_negative_at_<k>`.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.recall_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_k(predictions,
labels,
k,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of the predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of true
positives (i.e., correct predictions, items in the top `k` highest
`predictions` that are found in the corresponding row in `labels`) to
positives (all top `k` `predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_k` creates two local variables,
`true_positive_at_<k>` and `false_positive_at_<k>`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_<k>`: an idempotent operation that simply divides
`true_positive_at_<k>` by total (`true_positive_at_<k>` +
`false_positive_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
return metrics.precision_at_k(
k=k,
class_id=class_id,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
# TODO(ptucker): Validate range of values in labels?
def streaming_sparse_precision_at_top_k(top_k_predictions,
labels,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes precision@k of top-k predictions with respect to sparse labels.
If `class_id` is not specified, we calculate precision as the ratio of
true positives (i.e., correct predictions, items in `top_k_predictions`
that are found in the corresponding row in `labels`) to positives (all
`top_k_predictions`).
If `class_id` is specified, we calculate precision by considering only the
rows in the batch for which `class_id` is in the top `k` highest
`predictions`, and computing the fraction of them for which `class_id` is
in the corresponding row in `labels`.
We expect precision to decrease as `k` increases.
`streaming_sparse_precision_at_top_k` creates two local variables,
`true_positive_at_k` and `false_positive_at_k`, that are used to compute
the precision@k frequency. This frequency is ultimately returned as
`precision_at_k`: an idempotent operation that simply divides
`true_positive_at_k` by total (`true_positive_at_k` + `false_positive_at_k`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_k`. Internally, set operations applied to `top_k_predictions`
and `labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_k` and
`false_positive_at_k` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
`precision`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
ValueError: If `top_k_predictions` has rank < 2.
"""
default_name = _at_k_name('precision', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.precision_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def sparse_recall_at_top_k(labels,
top_k_predictions,
class_id=None,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes recall@k of top-k predictions with respect to sparse labels.
If `class_id` is specified, we calculate recall by considering only the
entries in the batch for which `class_id` is in the label, and computing
the fraction of them for which `class_id` is in the top-k `predictions`.
If `class_id` is not specified, we'll calculate recall as how often on
average a class among the labels of a batch entry is in the top-k
`predictions`.
`sparse_recall_at_top_k` creates two local variables, `true_positive_at_<k>`
and `false_negative_at_<k>`, that are used to compute the recall_at_k
frequency. This frequency is ultimately returned as `recall_at_<k>`: an
idempotent operation that simply divides `true_positive_at_<k>` by total
(`true_positive_at_<k>` + `false_negative_at_<k>`).
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall_at_<k>`. Set operations applied to `top_k` and `labels` calculate the
true positives and false negatives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_negative_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`top_k_predictions`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range always count towards `false_negative_at_<k>`.
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
`recall`.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match
`predictions`, or if either `metrics_collections` or `updates_collections`
are not a list or tuple.
"""
default_name = _at_k_name('recall', class_id=class_id)
with ops.name_scope(name, default_name,
(top_k_predictions, labels, weights)) as name_scope:
return metrics_impl.recall_at_top_k(
labels=labels,
predictions_idx=top_k_predictions,
class_id=class_id,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name_scope)
def _compute_recall_at_precision(tp, fp, fn, precision, name):
"""Helper function to compute recall at a given `precision`.
Args:
tp: The number of true positives.
fp: The number of false positives.
fn: The number of false negatives.
precision: The precision for which the recall will be calculated.
name: An optional variable_scope name.
Returns:
The recall at a the given `precision`.
"""
precisions = math_ops.div(tp, tp + fp + _EPSILON)
tf_index = math_ops.argmin(
math_ops.abs(precisions - precision), 0, output_type=dtypes.int32)
# Now, we have the implicit threshold, so compute the recall:
return math_ops.div(tp[tf_index], tp[tf_index] + fn[tf_index] + _EPSILON,
name)
def recall_at_precision(labels,
predictions,
precision,
weights=None,
num_thresholds=200,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes `recall` at `precision`.
The `recall_at_precision` function creates four local variables,
`tp` (true positives), `fp` (false positives) and `fn` (false negatives)
that are used to compute the `recall` at the given `precision` value. The
threshold for the given `precision` value is computed and used to evaluate the
corresponding `recall`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`recall`. `update_op` increments the `tp`, `fp` and `fn` counts with the
weight of each case found in the `predictions` and `labels`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
labels: The ground truth values, a `Tensor` whose dimensions must match
`predictions`. Will be cast to `bool`.
predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
precision: A scalar value in range `[0, 1]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `labels` dimension).
num_thresholds: The number of thresholds to use for matching the given
`precision`.
metrics_collections: An optional list of collections that `recall`
should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
recall: A scalar `Tensor` representing the recall at the given
`precision` value.
update_op: An operation that increments the `tp`, `fp` and `fn`
variables appropriately and whose value matches `recall`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, if
`weights` is not `None` and its shape doesn't match `predictions`, or if
`precision` is not between 0 and 1, or if either `metrics_collections`
or `updates_collections` are not a list or tuple.
"""
if not 0 <= precision <= 1:
raise ValueError('`precision` must be in the range [0, 1].')
with variable_scope.variable_scope(name, 'recall_at_precision',
(predictions, labels, weights)):
thresholds = [
i * 1.0 / (num_thresholds - 1) for i in range(1, num_thresholds - 1)
]
thresholds = [0.0 - _EPSILON] + thresholds + [1.0 + _EPSILON]
values, update_ops = _streaming_confusion_matrix_at_thresholds(
predictions, labels, thresholds, weights)
recall = _compute_recall_at_precision(values['tp'], values['fp'],
values['fn'], precision, 'value')
update_op = _compute_recall_at_precision(update_ops['tp'], update_ops['fp'],
update_ops['fn'], precision,
'update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, recall)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return recall, update_op
def streaming_sparse_average_precision_at_k(predictions,
labels,
k,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
See `sparse_average_precision_at_k` for details on formula. `weights` are
applied to the result of `sparse_average_precision_at_k`
`streaming_sparse_average_precision_at_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
`false_positive_at_<k>` using these values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_`. Values should be in range [0, num_classes), where
num_classes is the last dimension of `predictions`. Values outside this
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
"""
return metrics.average_precision_at_k(
k=k,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_sparse_average_precision_at_top_k(top_k_predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes average precision@k of predictions with respect to sparse labels.
`streaming_sparse_average_precision_at_top_k` creates two local variables,
`average_precision_at_<k>/total` and `average_precision_at_<k>/max`, that
are used to compute the frequency. This frequency is ultimately returned as
`average_precision_at_<k>`: an idempotent operation that simply divides
`average_precision_at_<k>/total` by `average_precision_at_<k>/max`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`precision_at_<k>`. Set operations applied to `top_k` and `labels` calculate
the true positives and false positives weighted by `weights`. Then `update_op`
increments `true_positive_at_<k>` and `false_positive_at_<k>` using these
values.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where N >= 1.
Commonly, N=1 and `predictions_idx` has shape [batch size, k]. The final
dimension must be set and contains the top `k` predicted class indices.
[D1, ... DN] must match `labels`. Values should be in range
[0, num_classes).
labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels] or [D1, ... DN], where the latter implies
num_labels=1. N >= 1 and num_labels is the number of target classes for
the associated prediction. Commonly, N=1 and `labels` has shape
[batch_size, num_labels]. [D1, ... DN] must match `top_k_predictions`.
Values should be in range [0, num_classes).
weights: `Tensor` whose rank is either 0, or n-1, where n is the rank of
`labels`. If the latter, it must be broadcastable to `labels` (i.e., all
dimensions must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that values should
be added to.
updates_collections: An optional list of collections that updates should
be added to.
name: Name of new update operation, and namespace for other dependent ops.
Returns:
mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
Raises:
ValueError: if the last dimension of top_k_predictions is not set.
"""
return metrics_impl._streaming_sparse_average_precision_at_top_k( # pylint: disable=protected-access
predictions_idx=top_k_predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
@deprecated(None, 'Please switch to tf.metrics.mean.')
def streaming_mean_absolute_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean absolute error between the labels and predictions.
The `streaming_mean_absolute_error` function creates two local variables,
`total` and `count` that are used to compute the mean absolute error. This
average is weighted by `weights`, and it is ultimately returned as
`mean_absolute_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_absolute_error`. Internally, an `absolute_errors` operation computes the
absolute value of the differences between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `absolute_errors`, and it increments `count` with the reduced
sum of `weights`
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_absolute_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_absolute_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_absolute_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_relative_error(predictions,
labels,
normalizer,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean relative error by normalizing with the given values.
The `streaming_mean_relative_error` function creates two local variables,
`total` and `count` that are used to compute the mean relative absolute error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_relative_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_reative_error`. Internally, a `relative_errors` operation divides the
absolute value of the differences between `predictions` and `labels` by the
`normalizer`. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `relative_errors`, and it increments `count` with the
reduced sum of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
normalizer: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_relative_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_relative_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_relative_error(
normalizer=normalizer,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the mean squared error between the labels and predictions.
The `streaming_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`mean_squared_error`: an idempotent operation that simply divides `total` by
`count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_squared_error`. Internally, a `squared_error` operation computes the
element-wise square of the difference between `predictions` and `labels`. Then
`update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
mean_squared_error: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_root_mean_squared_error(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the root mean squared error between the labels and predictions.
The `streaming_root_mean_squared_error` function creates two local variables,
`total` and `count` that are used to compute the root mean squared error.
This average is weighted by `weights`, and it is ultimately returned as
`root_mean_squared_error`: an idempotent operation that takes the square root
of the division of `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`root_mean_squared_error`. Internally, a `squared_error` operation computes
the element-wise square of the difference between `predictions` and `labels`.
Then `update_op` increments `total` with the reduced sum of the product of
`weights` and `squared_error`, and it increments `count` with the reduced sum
of `weights`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of arbitrary shape.
labels: A `Tensor` of the same shape as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
be added to.
name: An optional variable_scope name.
Returns:
root_mean_squared_error: A `Tensor` representing the current mean, the value
of `total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately and whose value matches `root_mean_squared_error`.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.root_mean_squared_error(
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_covariance(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the unbiased sample covariance between `predictions` and `labels`.
The `streaming_covariance` function creates four local variables,
`comoment`, `mean_prediction`, `mean_label`, and `count`, which are used to
compute the sample covariance between predictions and labels across multiple
batches of data. The covariance is ultimately returned as an idempotent
operation that simply divides `comoment` by `count` - 1. We use `count` - 1
in order to get an unbiased estimate.
The algorithm used for this online computation is described in
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance.
Specifically, the formula used to combine two sample comoments is
`C_AB = C_A + C_B + (E[x_A] - E[x_B]) * (E[y_A] - E[y_B]) * n_A * n_B / n_AB`
The comoment for a single batch of data is simply
`sum((x - E[x]) * (y - E[y]))`, optionally weighted.
If `weights` is not None, then it is used to compute weighted comoments,
means, and count. NOTE: these weights are treated as "frequency weights", as
opposed to "reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
To facilitate the computation of covariance across multiple batches of data,
the function creates an `update_op` operation, which updates underlying
variables and returns the updated covariance.
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as `predictions`.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
Raises:
ValueError: If labels and predictions are of different sizes or if either
`metrics_collections` or `updates_collections` are not a list or tuple.
"""
with variable_scope.variable_scope(name, 'covariance',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
mean_prediction = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_prediction')
mean_label = metrics_impl.metric_variable(
[], dtypes.float32, name='mean_label')
comoment = metrics_impl.metric_variable( # C_A in update equation
[], dtypes.float32, name='comoment')
if weights is None:
batch_count = math_ops.to_float(array_ops.size(labels)) # n_B in eqn
weighted_predictions = predictions
weighted_labels = labels
else:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
batch_count = math_ops.reduce_sum(weights) # n_B in eqn
weighted_predictions = math_ops.multiply(predictions, weights)
weighted_labels = math_ops.multiply(labels, weights)
update_count = state_ops.assign_add(count_, batch_count) # n_AB in eqn
prev_count = update_count - batch_count # n_A in update equation
# We update the means by Delta=Error*BatchCount/(BatchCount+PrevCount)
# batch_mean_prediction is E[x_B] in the update equation
batch_mean_prediction = _safe_div(
math_ops.reduce_sum(weighted_predictions), batch_count,
'batch_mean_prediction')
delta_mean_prediction = _safe_div(
(batch_mean_prediction - mean_prediction) * batch_count, update_count,
'delta_mean_prediction')
update_mean_prediction = state_ops.assign_add(mean_prediction,
delta_mean_prediction)
# prev_mean_prediction is E[x_A] in the update equation
prev_mean_prediction = update_mean_prediction - delta_mean_prediction
# batch_mean_label is E[y_B] in the update equation
batch_mean_label = _safe_div(
math_ops.reduce_sum(weighted_labels), batch_count, 'batch_mean_label')
delta_mean_label = _safe_div((batch_mean_label - mean_label) * batch_count,
update_count, 'delta_mean_label')
update_mean_label = state_ops.assign_add(mean_label, delta_mean_label)
# prev_mean_label is E[y_A] in the update equation
prev_mean_label = update_mean_label - delta_mean_label
unweighted_batch_coresiduals = ((predictions - batch_mean_prediction) *
(labels - batch_mean_label))
# batch_comoment is C_B in the update equation
if weights is None:
batch_comoment = math_ops.reduce_sum(unweighted_batch_coresiduals)
else:
batch_comoment = math_ops.reduce_sum(
unweighted_batch_coresiduals * weights)
# View delta_comoment as = C_AB - C_A in the update equation above.
# Since C_A is stored in a var, by how much do we need to increment that var
# to make the var = C_AB?
delta_comoment = (
batch_comoment + (prev_mean_prediction - batch_mean_prediction) *
(prev_mean_label - batch_mean_label) *
(prev_count * batch_count / update_count))
update_comoment = state_ops.assign_add(comoment, delta_comoment)
covariance = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='covariance')
with ops.control_dependencies([update_comoment]):
update_op = array_ops.where(
math_ops.less_equal(count_, 1.),
float('nan'),
math_ops.truediv(comoment, count_ - 1),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, covariance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return covariance, update_op
def streaming_pearson_correlation(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes Pearson correlation coefficient between `predictions`, `labels`.
The `streaming_pearson_correlation` function delegates to
`streaming_covariance` the tracking of three [co]variances:
- `streaming_covariance(predictions, labels)`, i.e. covariance
- `streaming_covariance(predictions, predictions)`, i.e. variance
- `streaming_covariance(labels, labels)`, i.e. variance
The product-moment correlation ultimately returned is an idempotent operation
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`. To
facilitate correlation computation across multiple batches, the function
groups the `update_op`s of the underlying streaming_covariance and returns an
`update_op`.
If `weights` is not None, then it is used to compute a weighted correlation.
NOTE: these weights are treated as "frequency weights", as opposed to
"reliability weights". See discussion of the difference on
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
predictions: A `Tensor` of arbitrary size.
labels: A `Tensor` of the same size as predictions.
weights: Optional `Tensor` indicating the frequency with which an example is
sampled. Rank must be 0, or the same rank as `labels`, and must be
broadcastable to `labels` (i.e., all dimensions must be either `1`, or
the same as the corresponding `labels` dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
pearson_r: A `Tensor` representing the current Pearson product-moment
correlation coefficient, the value of
`cov(predictions, labels) / sqrt(var(predictions) * var(labels))`.
update_op: An operation that updates the underlying variables appropriately.
Raises:
ValueError: If `labels` and `predictions` are of different sizes, or if
`weights` is the wrong size, or if either `metrics_collections` or
`updates_collections` are not a `list` or `tuple`.
"""
with variable_scope.variable_scope(name, 'pearson_r',
(predictions, labels, weights)):
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
# Broadcast weights here to avoid duplicate broadcasting in each call to
# `streaming_covariance`.
if weights is not None:
weights = weights_broadcast_ops.broadcast_weights(weights, labels)
cov, update_cov = streaming_covariance(
predictions, labels, weights=weights, name='covariance')
var_predictions, update_var_predictions = streaming_covariance(
predictions, predictions, weights=weights, name='variance_predictions')
var_labels, update_var_labels = streaming_covariance(
labels, labels, weights=weights, name='variance_labels')
pearson_r = math_ops.truediv(
cov,
math_ops.multiply(
math_ops.sqrt(var_predictions), math_ops.sqrt(var_labels)),
name='pearson_r')
update_op = math_ops.truediv(
update_cov,
math_ops.multiply(
math_ops.sqrt(update_var_predictions),
math_ops.sqrt(update_var_labels)),
name='update_op')
if metrics_collections:
ops.add_to_collections(metrics_collections, pearson_r)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return pearson_r, update_op
# TODO(nsilberman): add a 'normalized' flag so that the user can request
# normalization if the inputs are not normalized.
def streaming_mean_cosine_distance(predictions,
labels,
dim,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the cosine distance between the labels and predictions.
The `streaming_mean_cosine_distance` function creates two local variables,
`total` and `count` that are used to compute the average cosine distance
between `predictions` and `labels`. This average is weighted by `weights`,
and it is ultimately returned as `mean_distance`, which is an idempotent
operation that simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`mean_distance`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of the same shape as `labels`.
labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
mean_distance: A `Tensor` representing the current mean, the value of
`total` divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
predictions, labels, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions, labels, weights)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[
dim,
], keep_dims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,
name or 'mean_cosine_distance')
mean_distance = math_ops.subtract(1.0, mean_distance)
update_op = math_ops.subtract(1.0, update_op)
if metrics_collections:
ops.add_to_collections(metrics_collections, mean_distance)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return mean_distance, update_op
def streaming_percentage_less(values,
threshold,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the percentage of values less than the given threshold.
The `streaming_percentage_less` function creates two local variables,
`total` and `count` that are used to compute the percentage of `values` that
fall below `threshold`. This rate is weighted by `weights`, and it is
ultimately returned as `percentage` which is an idempotent operation that
simply divides `total` by `count`.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
`percentage`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
percentage: A `Tensor` representing the current mean, the value of `total`
divided by `count`.
update_op: An operation that increments the `total` and `count` variables
appropriately.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
return metrics.percentage_below(
values=values,
threshold=threshold,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def streaming_mean_iou(predictions,
labels,
num_classes,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Calculate per-step mean Intersection-Over-Union (mIOU).
Mean Intersection-Over-Union is a common evaluation metric for
semantic image segmentation, which first computes the IOU for each
semantic class and then computes the average over classes.
IOU is defined as follows:
IOU = true_positive / (true_positive + false_positive + false_negative).
The predictions are accumulated in a confusion matrix, weighted by `weights`,
and mIOU is then calculated from it.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `mean_iou`.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
predictions: A `Tensor` of prediction results for semantic labels, whose
shape is [batch size] and type `int32` or `int64`. The tensor will be
flattened, if its rank > 1.
labels: A `Tensor` of ground truth labels with shape [batch size] and of
type `int32` or `int64`. The tensor will be flattened, if its rank > 1.
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
mean_iou: A `Tensor` representing the mean intersection-over-union.
update_op: An operation that increments the confusion matrix.
Raises:
ValueError: If `predictions` and `labels` have mismatched shapes, or if
`weights` is not `None` and its shape doesn't match `predictions`, or if
either `metrics_collections` or `updates_collections` are not a list or
tuple.
"""
return metrics.mean_iou(
num_classes=num_classes,
predictions=predictions,
labels=labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32)) / math_ops.log(
math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor**exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that appends the values of a tensor and returns the
length of the concatenated axis.
This op allows for evaluating metrics that cannot be updated incrementally
using the same framework as other streaming metrics.
Args:
values: `Tensor` to concatenate. Rank and the shape along all axes other
than the axis to concatenate along must be statically known.
axis: optional integer axis to concatenate along.
max_size: optional integer maximum size of `value` along the given axis.
Once the maximum size is reached, further updates are no-ops. By default,
there is no maximum size: the array is resized as necessary.
metrics_collections: An optional list of collections that `value`
should be added to.
updates_collections: An optional list of collections `update_op` should be
added to.
name: An optional variable_scope name.
Returns:
value: A `Tensor` representing the concatenated values.
update_op: An operation that concatenates the next values.
Raises:
ValueError: if `values` does not have a statically known rank, `axis` is
not in the valid range or the size of `values` is not statically known
along any axis other than `axis`.
"""
with variable_scope.variable_scope(name, 'streaming_concat', (values,)):
# pylint: disable=invalid-slice-index
values_shape = values.get_shape()
if values_shape.dims is None:
raise ValueError('`values` must have known statically known rank')
ndim = len(values_shape)
if axis < 0:
axis += ndim
if not 0 <= axis < ndim:
raise ValueError('axis = %r not in [0, %r)' % (axis, ndim))
fixed_shape = [dim.value for n, dim in enumerate(values_shape) if n != axis]
if any(value is None for value in fixed_shape):
raise ValueError('all dimensions of `values` other than the dimension to '
'concatenate along must have statically known size')
# We move `axis` to the front of the internal array so assign ops can be
# applied to contiguous slices
init_size = 0 if max_size is None else max_size
init_shape = [init_size] + fixed_shape
array = metrics_impl.metric_variable(
init_shape, values.dtype, validate_shape=False, name='array')
size = metrics_impl.metric_variable([], dtypes.int32, name='size')
perm = [0 if n == axis else n + 1 if n < axis else n for n in range(ndim)]
valid_array = array[:size]
valid_array.set_shape([None] + fixed_shape)
value = array_ops.transpose(valid_array, perm, name='concat')
values_size = array_ops.shape(values)[axis]
if max_size is None:
batch_size = values_size
else:
batch_size = math_ops.minimum(values_size, max_size - size)
perm = [axis] + [n for n in range(ndim) if n != axis]
batch_values = array_ops.transpose(values, perm)[:batch_size]
def reallocate():
next_size = _next_array_size(new_size)
next_shape = array_ops.stack([next_size] + fixed_shape)
new_value = array_ops.zeros(next_shape, dtype=values.dtype)
old_value = array.value()
assign_op = state_ops.assign(array, new_value, validate_shape=False)
with ops.control_dependencies([assign_op]):
copy_op = array[:size].assign(old_value[:size])
# return value needs to be the same dtype as no_op() for cond
with ops.control_dependencies([copy_op]):
return control_flow_ops.no_op()
new_size = size + batch_size
array_size = array_ops.shape_internal(array, optimize=False)[0]
maybe_reallocate_op = control_flow_ops.cond(
new_size > array_size, reallocate, control_flow_ops.no_op)
with ops.control_dependencies([maybe_reallocate_op]):
append_values_op = array[size:new_size].assign(batch_values)
with ops.control_dependencies([append_values_op]):
update_op = size.assign(new_size)
if metrics_collections:
ops.add_to_collections(metrics_collections, value)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return value, update_op
# pylint: enable=invalid-slice-index
def aggregate_metrics(*value_update_tuples):
"""Aggregates the metric value tensors and update ops into two lists.
Args:
*value_update_tuples: a variable number of tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A list of value `Tensor` objects and a list of update ops.
Raises:
ValueError: if `value_update_tuples` is empty.
"""
if not value_update_tuples:
raise ValueError('Expected at least one value_tensor/update_op pair')
value_ops, update_ops = zip(*value_update_tuples)
return list(value_ops), list(update_ops)
def aggregate_metric_map(names_to_tuples):
"""Aggregates the metric names to tuple dictionary.
This function is useful for pairing metric names with their associated value
and update ops when the list of metrics is long. For example:
```python
metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
'Mean Absolute Error': new_slim.metrics.streaming_mean_absolute_error(
predictions, labels, weights),
'Mean Relative Error': new_slim.metrics.streaming_mean_relative_error(
predictions, labels, labels, weights),
'RMSE Linear': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
'RMSE Log': new_slim.metrics.streaming_root_mean_squared_error(
predictions, labels, weights),
})
```
Args:
names_to_tuples: a map of metric names to tuples, each of which contain the
pair of (value_tensor, update_op) from a streaming metric.
Returns:
A dictionary from metric names to value ops and a dictionary from metric
names to update ops.
"""
metric_names = names_to_tuples.keys()
value_ops, update_ops = zip(*names_to_tuples.values())
return dict(zip(metric_names, value_ops)), dict(zip(metric_names, update_ops))
def count(values,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Computes the number of examples, or sum of `weights`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
values: A `Tensor` of arbitrary dimensions. Only it's shape is used.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions
must be either `1`, or the same as the corresponding `labels`
dimension).
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
ops should be added to.
name: An optional variable_scope name.
Returns:
count: A `Tensor` representing the current value of the metric.
update_op: An operation that accumulates the metric from a batch of data.
Raises:
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
"""
with variable_scope.variable_scope(name, 'count', (values, weights)):
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
_, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
weights = weights_broadcast_ops.broadcast_weights(
math_ops.to_float(weights), values)
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
update_op = state_ops.assign_add(count_, num_values)
if metrics_collections:
ops.add_to_collections(metrics_collections, count_)
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return count_, update_op
__all__ = [
'aggregate_metric_map',
'aggregate_metrics',
'count',
'precision_recall_at_equal_thresholds',
'recall_at_precision',
'sparse_recall_at_top_k',
'streaming_accuracy',
'streaming_auc',
'streaming_curve_points',
'streaming_dynamic_auc',
'streaming_false_negative_rate',
'streaming_false_negative_rate_at_thresholds',
'streaming_false_negatives',
'streaming_false_negatives_at_thresholds',
'streaming_false_positive_rate',
'streaming_false_positive_rate_at_thresholds',
'streaming_false_positives',
'streaming_false_positives_at_thresholds',
'streaming_mean',
'streaming_mean_absolute_error',
'streaming_mean_cosine_distance',
'streaming_mean_iou',
'streaming_mean_relative_error',
'streaming_mean_squared_error',
'streaming_mean_tensor',
'streaming_percentage_less',
'streaming_precision',
'streaming_precision_at_thresholds',
'streaming_recall',
'streaming_recall_at_k',
'streaming_recall_at_thresholds',
'streaming_root_mean_squared_error',
'streaming_sensitivity_at_specificity',
'streaming_sparse_average_precision_at_k',
'streaming_sparse_average_precision_at_top_k',
'streaming_sparse_precision_at_k',
'streaming_sparse_precision_at_top_k',
'streaming_sparse_recall_at_k',
'streaming_specificity_at_sensitivity',
'streaming_true_negatives',
'streaming_true_negatives_at_thresholds',
'streaming_true_positives',
'streaming_true_positives_at_thresholds',
]
|
ppiotr/Invenio
|
refs/heads/docextract
|
modules/miscutil/lib/xapianutils_config.py
|
29
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Xapian utilities.
"""
from invenio.config import CFG_CACHEDIR
INDEXES = ("abstract", "author", "fulltext", "keyword", "title")
DATABASES = dict()
XAPIAN_DIR_NAME = "xapian_indexes"
XAPIAN_DIR = CFG_CACHEDIR + "/" + XAPIAN_DIR_NAME
|
Caio99BR/FalconSSKernel_20.0
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
Pathoschild/stewbot
|
refs/heads/master
|
stewbot/components/modules/mechanize/_form.py
|
1
|
"""HTML form handling for web clients.
HTML form handling for web clients: useful for parsing HTML forms, filling them
in and returning the completed forms to the server. This code developed from a
port of Gisle Aas' Perl module HTML::Form, from the libwww-perl library, but
the interface is not the same.
The most useful docstring is the one for HTMLForm.
RFC 1866: HTML 2.0
RFC 1867: Form-based File Upload in HTML
RFC 2388: Returning Values from Forms: multipart/form-data
HTML 3.2 Specification, W3C Recommendation 14 January 1997 (for ISINDEX)
HTML 4.01 Specification, W3C Recommendation 24 December 1999
Copyright 2002-2007 John J. Lee <[email protected]>
Copyright 2005 Gary Poster
Copyright 2005 Zope Corporation
Copyright 1998-2000 Gisle Aas.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# TODO:
# Clean up post the merge into mechanize
# * Remove code that was duplicated in ClientForm and mechanize
# * Remove weird import stuff
# * Remove pre-Python 2.4 compatibility cruft
# * Clean up tests
# * Later release: Remove the ClientForm 0.1 backwards-compatibility switch
# Remove parser testing hack
# Clean action URI
# Switch to unicode throughout
# See Wichert Akkerman's 2004-01-22 message to c.l.py.
# Apply recommendations from google code project CURLIES
# Apply recommendations from HTML 5 spec
# Add charset parameter to Content-type headers? How to find value??
# Functional tests to add:
# Single and multiple file upload
# File upload with missing name (check standards)
# mailto: submission & enctype text/plain??
# Replace by_label etc. with moniker / selector concept. Allows, e.g., a
# choice between selection by value / id / label / element contents. Or
# choice between matching labels exactly or by substring. etc.
__all__ = ['AmbiguityError', 'CheckboxControl', 'Control',
'ControlNotFoundError', 'FileControl', 'FormParser', 'HTMLForm',
'HiddenControl', 'IgnoreControl', 'ImageControl', 'IsindexControl',
'Item', 'ItemCountError', 'ItemNotFoundError', 'Label',
'ListControl', 'LocateError', 'Missing', 'ParseError', 'ParseFile',
'ParseFileEx', 'ParseResponse', 'ParseResponseEx','PasswordControl',
'RadioControl', 'ScalarControl', 'SelectControl',
'SubmitButtonControl', 'SubmitControl', 'TextControl',
'TextareaControl', 'XHTMLCompatibleFormParser']
import HTMLParser
from cStringIO import StringIO
import inspect
import logging
import random
import re
import sys
import urllib
import urlparse
import warnings
import _beautifulsoup
import _request
# from Python itself, for backwards compatibility of raised exceptions
import sgmllib
# bundled copy of sgmllib
import _sgmllib_copy
VERSION = "0.2.11"
CHUNK = 1024 # size of chunks fed to parser, in bytes
DEFAULT_ENCODING = "latin-1"
_logger = logging.getLogger("mechanize.forms")
OPTIMIZATION_HACK = True
def debug(msg, *args, **kwds):
if OPTIMIZATION_HACK:
return
caller_name = inspect.stack()[1][3]
extended_msg = '%%s %s' % msg
extended_args = (caller_name,)+args
_logger.debug(extended_msg, *extended_args, **kwds)
def _show_debug_messages():
global OPTIMIZATION_HACK
OPTIMIZATION_HACK = False
_logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
_logger.addHandler(handler)
def deprecation(message, stack_offset=0):
warnings.warn(message, DeprecationWarning, stacklevel=3+stack_offset)
class Missing: pass
_compress_re = re.compile(r"\s+")
def compress_text(text): return _compress_re.sub(" ", text.strip())
def normalize_line_endings(text):
return re.sub(r"(?:(?<!\r)\n)|(?:\r(?!\n))", "\r\n", text)
def unescape(data, entities, encoding=DEFAULT_ENCODING):
if data is None or "&" not in data:
return data
def replace_entities(match, entities=entities, encoding=encoding):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent)
if repl is not None:
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
def get_entitydefs():
import htmlentitydefs
from codecs import latin_1_decode
entitydefs = {}
try:
htmlentitydefs.name2codepoint
except AttributeError:
entitydefs = {}
for name, char in htmlentitydefs.entitydefs.items():
uc = latin_1_decode(char)[0]
if uc.startswith("&#") and uc.endswith(";"):
uc = unescape_charref(uc[2:-1], None)
entitydefs["&%s;" % name] = uc
else:
for name, codepoint in htmlentitydefs.name2codepoint.items():
entitydefs["&%s;" % name] = unichr(codepoint)
return entitydefs
def issequence(x):
try:
x[0]
except (TypeError, KeyError):
return False
except IndexError:
pass
return True
def isstringlike(x):
try: x+""
except: return False
else: return True
def choose_boundary():
"""Return a string usable as a multipart boundary."""
# follow IE and firefox
nonce = "".join([str(random.randint(0, sys.maxint-1)) for i in 0,1,2])
return "-"*27 + nonce
# This cut-n-pasted MimeWriter from standard library is here so can add
# to HTTP headers rather than message body when appropriate. It also uses
# \r\n in place of \n. This is a bit nasty.
class MimeWriter:
"""Generic MIME writer.
Methods:
__init__()
addheader()
flushheaders()
startbody()
startmultipartbody()
nextpart()
lastpart()
A MIME writer is much more primitive than a MIME parser. It
doesn't seek around on the output file, and it doesn't use large
amounts of buffer space, so you have to write the parts in the
order they should occur on the output file. It does buffer the
headers you add, allowing you to rearrange their order.
General usage is:
f = <open the output file>
w = MimeWriter(f)
...call w.addheader(key, value) 0 or more times...
followed by either:
f = w.startbody(content_type)
...call f.write(data) for body data...
or:
w.startmultipartbody(subtype)
for each part:
subwriter = w.nextpart()
...use the subwriter's methods to create the subpart...
w.lastpart()
The subwriter is another MimeWriter instance, and should be
treated in the same way as the toplevel MimeWriter. This way,
writing recursive body parts is easy.
Warning: don't forget to call lastpart()!
XXX There should be more state so calls made in the wrong order
are detected.
Some special cases:
- startbody() just returns the file passed to the constructor;
but don't use this knowledge, as it may be changed.
- startmultipartbody() actually returns a file as well;
this can be used to write the initial 'if you can read this your
mailer is not MIME-aware' message.
- If you call flushheaders(), the headers accumulated so far are
written out (and forgotten); this is useful if you don't need a
body part at all, e.g. for a subpart of type message/rfc822
that's (mis)used to store some header-like information.
- Passing a keyword argument 'prefix=<flag>' to addheader(),
start*body() affects where the header is inserted; 0 means
append at the end, 1 means insert at the start; default is
append for addheader(), but insert for start*body(), which use
it to determine where the Content-type header goes.
"""
def __init__(self, fp, http_hdrs=None):
self._http_hdrs = http_hdrs
self._fp = fp
self._headers = []
self._boundary = []
self._first_part = True
def addheader(self, key, value, prefix=0,
add_to_http_hdrs=0):
"""
prefix is ignored if add_to_http_hdrs is true.
"""
lines = value.split("\r\n")
while lines and not lines[-1]: del lines[-1]
while lines and not lines[0]: del lines[0]
if add_to_http_hdrs:
value = "".join(lines)
# 2.2 urllib2 doesn't normalize header case
self._http_hdrs.append((key.capitalize(), value))
else:
for i in range(1, len(lines)):
lines[i] = " " + lines[i].strip()
value = "\r\n".join(lines) + "\r\n"
line = key.title() + ": " + value
if prefix:
self._headers.insert(0, line)
else:
self._headers.append(line)
def flushheaders(self):
self._fp.writelines(self._headers)
self._headers = []
def startbody(self, ctype=None, plist=[], prefix=1,
add_to_http_hdrs=0, content_type=1):
"""
prefix is ignored if add_to_http_hdrs is true.
"""
if content_type and ctype:
for name, value in plist:
ctype = ctype + ';\r\n %s=%s' % (name, value)
self.addheader("Content-Type", ctype, prefix=prefix,
add_to_http_hdrs=add_to_http_hdrs)
self.flushheaders()
if not add_to_http_hdrs: self._fp.write("\r\n")
self._first_part = True
return self._fp
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1,
add_to_http_hdrs=0, content_type=1):
boundary = boundary or choose_boundary()
self._boundary.append(boundary)
return self.startbody("multipart/" + subtype,
[("boundary", boundary)] + plist,
prefix=prefix,
add_to_http_hdrs=add_to_http_hdrs,
content_type=content_type)
def nextpart(self):
boundary = self._boundary[-1]
if self._first_part:
self._first_part = False
else:
self._fp.write("\r\n")
self._fp.write("--" + boundary + "\r\n")
return self.__class__(self._fp)
def lastpart(self):
if self._first_part:
self.nextpart()
boundary = self._boundary.pop()
self._fp.write("\r\n--" + boundary + "--\r\n")
class LocateError(ValueError): pass
class AmbiguityError(LocateError): pass
class ControlNotFoundError(LocateError): pass
class ItemNotFoundError(LocateError): pass
class ItemCountError(ValueError): pass
# for backwards compatibility, ParseError derives from exceptions that were
# raised by versions of ClientForm <= 0.2.5
# TODO: move to _html
class ParseError(sgmllib.SGMLParseError,
HTMLParser.HTMLParseError):
pass
class _AbstractFormParser:
"""forms attribute contains HTMLForm instances on completion."""
# thanks to Moshe Zadka for an example of sgmllib/htmllib usage
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
if entitydefs is None:
entitydefs = get_entitydefs()
self._entitydefs = entitydefs
self._encoding = encoding
self.base = None
self.forms = []
self.labels = []
self._current_label = None
self._current_form = None
self._select = None
self._optgroup = None
self._option = None
self._textarea = None
# forms[0] will contain all controls that are outside of any form
# self._global_form is an alias for self.forms[0]
self._global_form = None
self.start_form([])
self.end_form()
self._current_form = self._global_form = self.forms[0]
def do_base(self, attrs):
debug("%s", attrs)
for key, value in attrs:
if key == "href":
self.base = self.unescape_attr_if_required(value)
def end_body(self):
debug("")
if self._current_label is not None:
self.end_label()
if self._current_form is not self._global_form:
self.end_form()
def start_form(self, attrs):
debug("%s", attrs)
if self._current_form is not self._global_form:
raise ParseError("nested FORMs")
name = None
action = None
enctype = "application/x-www-form-urlencoded"
method = "GET"
d = {}
for key, value in attrs:
if key == "name":
name = self.unescape_attr_if_required(value)
elif key == "action":
action = self.unescape_attr_if_required(value)
elif key == "method":
method = self.unescape_attr_if_required(value.upper())
elif key == "enctype":
enctype = self.unescape_attr_if_required(value.lower())
d[key] = self.unescape_attr_if_required(value)
controls = []
self._current_form = (name, action, method, enctype), d, controls
def end_form(self):
debug("")
if self._current_label is not None:
self.end_label()
if self._current_form is self._global_form:
raise ParseError("end of FORM before start")
self.forms.append(self._current_form)
self._current_form = self._global_form
def start_select(self, attrs):
debug("%s", attrs)
if self._select is not None:
raise ParseError("nested SELECTs")
if self._textarea is not None:
raise ParseError("SELECT inside TEXTAREA")
d = {}
for key, val in attrs:
d[key] = self.unescape_attr_if_required(val)
self._select = d
self._add_label(d)
self._append_select_control({"__select": d})
def end_select(self):
debug("")
if self._select is None:
raise ParseError("end of SELECT before start")
if self._option is not None:
self._end_option()
self._select = None
def start_optgroup(self, attrs):
debug("%s", attrs)
if self._select is None:
raise ParseError("OPTGROUP outside of SELECT")
d = {}
for key, val in attrs:
d[key] = self.unescape_attr_if_required(val)
self._optgroup = d
def end_optgroup(self):
debug("")
if self._optgroup is None:
raise ParseError("end of OPTGROUP before start")
self._optgroup = None
def _start_option(self, attrs):
debug("%s", attrs)
if self._select is None:
raise ParseError("OPTION outside of SELECT")
if self._option is not None:
self._end_option()
d = {}
for key, val in attrs:
d[key] = self.unescape_attr_if_required(val)
self._option = {}
self._option.update(d)
if (self._optgroup and self._optgroup.has_key("disabled") and
not self._option.has_key("disabled")):
self._option["disabled"] = None
def _end_option(self):
debug("")
if self._option is None:
raise ParseError("end of OPTION before start")
contents = self._option.get("contents", "").strip()
self._option["contents"] = contents
if not self._option.has_key("value"):
self._option["value"] = contents
if not self._option.has_key("label"):
self._option["label"] = contents
# stuff dict of SELECT HTML attrs into a special private key
# (gets deleted again later)
self._option["__select"] = self._select
self._append_select_control(self._option)
self._option = None
def _append_select_control(self, attrs):
debug("%s", attrs)
controls = self._current_form[2]
name = self._select.get("name")
controls.append(("select", name, attrs))
def start_textarea(self, attrs):
debug("%s", attrs)
if self._textarea is not None:
raise ParseError("nested TEXTAREAs")
if self._select is not None:
raise ParseError("TEXTAREA inside SELECT")
d = {}
for key, val in attrs:
d[key] = self.unescape_attr_if_required(val)
self._add_label(d)
self._textarea = d
def end_textarea(self):
debug("")
if self._textarea is None:
raise ParseError("end of TEXTAREA before start")
controls = self._current_form[2]
name = self._textarea.get("name")
controls.append(("textarea", name, self._textarea))
self._textarea = None
def start_label(self, attrs):
debug("%s", attrs)
if self._current_label:
self.end_label()
d = {}
for key, val in attrs:
d[key] = self.unescape_attr_if_required(val)
taken = bool(d.get("for")) # empty id is invalid
d["__text"] = ""
d["__taken"] = taken
if taken:
self.labels.append(d)
self._current_label = d
def end_label(self):
debug("")
label = self._current_label
if label is None:
# something is ugly in the HTML, but we're ignoring it
return
self._current_label = None
# if it is staying around, it is True in all cases
del label["__taken"]
def _add_label(self, d):
#debug("%s", d)
if self._current_label is not None:
if not self._current_label["__taken"]:
self._current_label["__taken"] = True
d["__label"] = self._current_label
def handle_data(self, data):
debug("%s", data)
if self._option is not None:
# self._option is a dictionary of the OPTION element's HTML
# attributes, but it has two special keys, one of which is the
# special "contents" key contains text between OPTION tags (the
# other is the "__select" key: see the end_option method)
map = self._option
key = "contents"
elif self._textarea is not None:
map = self._textarea
key = "value"
data = normalize_line_endings(data)
# not if within option or textarea
elif self._current_label is not None:
map = self._current_label
key = "__text"
else:
return
if data and not map.has_key(key):
# according to
# http://www.w3.org/TR/html4/appendix/notes.html#h-B.3.1 line break
# immediately after start tags or immediately before end tags must
# be ignored, but real browsers only ignore a line break after a
# start tag, so we'll do that.
if data[0:2] == "\r\n":
data = data[2:]
elif data[0:1] in ["\n", "\r"]:
data = data[1:]
map[key] = data
else:
map[key] = map[key] + data
def do_button(self, attrs):
debug("%s", attrs)
d = {}
d["type"] = "submit" # default
for key, val in attrs:
d[key] = self.unescape_attr_if_required(val)
controls = self._current_form[2]
type = d["type"]
name = d.get("name")
# we don't want to lose information, so use a type string that
# doesn't clash with INPUT TYPE={SUBMIT,RESET,BUTTON}
# e.g. type for BUTTON/RESET is "resetbutton"
# (type for INPUT/RESET is "reset")
type = type+"button"
self._add_label(d)
controls.append((type, name, d))
def do_input(self, attrs):
debug("%s", attrs)
d = {}
d["type"] = "text" # default
for key, val in attrs:
d[key] = self.unescape_attr_if_required(val)
controls = self._current_form[2]
type = d["type"]
name = d.get("name")
self._add_label(d)
controls.append((type, name, d))
def do_isindex(self, attrs):
debug("%s", attrs)
d = {}
for key, val in attrs:
d[key] = self.unescape_attr_if_required(val)
controls = self._current_form[2]
self._add_label(d)
# isindex doesn't have type or name HTML attributes
controls.append(("isindex", None, d))
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
try:
val.items
except AttributeError:
escaped_attrs[key] = self.unescape_attr(val)
else:
# e.g. "__select" -- yuck!
escaped_attrs[key] = self.unescape_attrs(val)
return escaped_attrs
def unknown_entityref(self, ref): self.handle_data("&%s;" % ref)
def unknown_charref(self, ref): self.handle_data("&#%s;" % ref)
class XHTMLCompatibleFormParser(_AbstractFormParser, HTMLParser.HTMLParser):
"""Good for XHTML, bad for tolerance of incorrect HTML."""
# thanks to Michael Howitz for this!
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
HTMLParser.HTMLParser.__init__(self)
_AbstractFormParser.__init__(self, entitydefs, encoding)
def feed(self, data):
try:
HTMLParser.HTMLParser.feed(self, data)
except HTMLParser.HTMLParseError, exc:
raise ParseError(exc)
def start_option(self, attrs):
_AbstractFormParser._start_option(self, attrs)
def end_option(self):
_AbstractFormParser._end_option(self)
def handle_starttag(self, tag, attrs):
try:
method = getattr(self, "start_" + tag)
except AttributeError:
try:
method = getattr(self, "do_" + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
try:
method = getattr(self, "end_" + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
def unescape_attrs_if_required(self, attrs):
return attrs # ditto
def close(self):
HTMLParser.HTMLParser.close(self)
self.end_body()
class _AbstractSgmllibParser(_AbstractFormParser):
def do_option(self, attrs):
_AbstractFormParser._start_option(self, attrs)
# we override this attr to decode hex charrefs
entity_or_charref = re.compile(
'&(?:([a-zA-Z][-.a-zA-Z0-9]*)|#(x?[0-9a-fA-F]+))(;?)')
def convert_entityref(self, name):
return unescape("&%s;" % name, self._entitydefs, self._encoding)
def convert_charref(self, name):
return unescape_charref("%s" % name, self._encoding)
def unescape_attr_if_required(self, name):
return name # sgmllib already did it
def unescape_attrs_if_required(self, attrs):
return attrs # ditto
class FormParser(_AbstractSgmllibParser, _sgmllib_copy.SGMLParser):
"""Good for tolerance of incorrect HTML, bad for XHTML."""
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
_sgmllib_copy.SGMLParser.__init__(self)
_AbstractFormParser.__init__(self, entitydefs, encoding)
def feed(self, data):
try:
_sgmllib_copy.SGMLParser.feed(self, data)
except _sgmllib_copy.SGMLParseError, exc:
raise ParseError(exc)
def close(self):
_sgmllib_copy.SGMLParser.close(self)
self.end_body()
class _AbstractBSFormParser(_AbstractSgmllibParser):
bs_base_class = None
def __init__(self, entitydefs=None, encoding=DEFAULT_ENCODING):
_AbstractFormParser.__init__(self, entitydefs, encoding)
self.bs_base_class.__init__(self)
def handle_data(self, data):
_AbstractFormParser.handle_data(self, data)
self.bs_base_class.handle_data(self, data)
def feed(self, data):
try:
self.bs_base_class.feed(self, data)
except _sgmllib_copy.SGMLParseError, exc:
raise ParseError(exc)
def close(self):
self.bs_base_class.close(self)
self.end_body()
class RobustFormParser(_AbstractBSFormParser, _beautifulsoup.BeautifulSoup):
"""Tries to be highly tolerant of incorrect HTML."""
bs_base_class = _beautifulsoup.BeautifulSoup
class NestingRobustFormParser(_AbstractBSFormParser,
_beautifulsoup.ICantBelieveItsBeautifulSoup):
"""Tries to be highly tolerant of incorrect HTML.
Different from RobustFormParser in that it more often guesses nesting
above missing end tags (see BeautifulSoup docs).
"""
bs_base_class = _beautifulsoup.ICantBelieveItsBeautifulSoup
#FormParser = XHTMLCompatibleFormParser # testing hack
#FormParser = RobustFormParser # testing hack
def ParseResponseEx(response,
select_default=False,
form_parser_class=FormParser,
request_class=_request.Request,
entitydefs=None,
encoding=DEFAULT_ENCODING,
# private
_urljoin=urlparse.urljoin,
_urlparse=urlparse.urlparse,
_urlunparse=urlparse.urlunparse,
):
"""Identical to ParseResponse, except that:
1. The returned list contains an extra item. The first form in the list
contains all controls not contained in any FORM element.
2. The arguments ignore_errors and backwards_compat have been removed.
3. Backwards-compatibility mode (backwards_compat=True) is not available.
"""
return _ParseFileEx(response, response.geturl(),
select_default,
False,
form_parser_class,
request_class,
entitydefs,
False,
encoding,
_urljoin=_urljoin,
_urlparse=_urlparse,
_urlunparse=_urlunparse,
)
def ParseFileEx(file, base_uri,
select_default=False,
form_parser_class=FormParser,
request_class=_request.Request,
entitydefs=None,
encoding=DEFAULT_ENCODING,
# private
_urljoin=urlparse.urljoin,
_urlparse=urlparse.urlparse,
_urlunparse=urlparse.urlunparse,
):
"""Identical to ParseFile, except that:
1. The returned list contains an extra item. The first form in the list
contains all controls not contained in any FORM element.
2. The arguments ignore_errors and backwards_compat have been removed.
3. Backwards-compatibility mode (backwards_compat=True) is not available.
"""
return _ParseFileEx(file, base_uri,
select_default,
False,
form_parser_class,
request_class,
entitydefs,
False,
encoding,
_urljoin=_urljoin,
_urlparse=_urlparse,
_urlunparse=_urlunparse,
)
def ParseString(text, base_uri, *args, **kwds):
fh = StringIO(text)
return ParseFileEx(fh, base_uri, *args, **kwds)
def ParseResponse(response, *args, **kwds):
"""Parse HTTP response and return a list of HTMLForm instances.
The return value of mechanize.urlopen can be conveniently passed to this
function as the response parameter.
mechanize.ParseError is raised on parse errors.
response: file-like object (supporting read() method) with a method
geturl(), returning the URI of the HTTP response
select_default: for multiple-selection SELECT controls and RADIO controls,
pick the first item as the default if none are selected in the HTML
form_parser_class: class to instantiate and use to pass
request_class: class to return from .click() method (default is
mechanize.Request)
entitydefs: mapping like {"&": "&", ...} containing HTML entity
definitions (a sensible default is used)
encoding: character encoding used for encoding numeric character references
when matching link text. mechanize does not attempt to find the encoding
in a META HTTP-EQUIV attribute in the document itself (mechanize, for
example, does do that and will pass the correct value to mechanize using
this parameter).
backwards_compat: boolean that determines whether the returned HTMLForm
objects are backwards-compatible with old code. If backwards_compat is
true:
- ClientForm 0.1 code will continue to work as before.
- Label searches that do not specify a nr (number or count) will always
get the first match, even if other controls match. If
backwards_compat is False, label searches that have ambiguous results
will raise an AmbiguityError.
- Item label matching is done by strict string comparison rather than
substring matching.
- De-selecting individual list items is allowed even if the Item is
disabled.
The backwards_compat argument will be removed in a future release.
Pass a true value for select_default if you want the behaviour specified by
RFC 1866 (the HTML 2.0 standard), which is to select the first item in a
RADIO or multiple-selection SELECT control if none were selected in the
HTML. Most browsers (including Microsoft Internet Explorer (IE) and
Netscape Navigator) instead leave all items unselected in these cases. The
W3C HTML 4.0 standard leaves this behaviour undefined in the case of
multiple-selection SELECT controls, but insists that at least one RADIO
button should be checked at all times, in contradiction to browser
behaviour.
There is a choice of parsers. mechanize.XHTMLCompatibleFormParser (uses
HTMLParser.HTMLParser) works best for XHTML, mechanize.FormParser (uses
bundled copy of sgmllib.SGMLParser) (the default) works better for ordinary
grubby HTML. Note that HTMLParser is only available in Python 2.2 and
later. You can pass your own class in here as a hack to work around bad
HTML, but at your own risk: there is no well-defined interface.
"""
return _ParseFileEx(response, response.geturl(), *args, **kwds)[1:]
def ParseFile(file, base_uri, *args, **kwds):
"""Parse HTML and return a list of HTMLForm instances.
mechanize.ParseError is raised on parse errors.
file: file-like object (supporting read() method) containing HTML with zero
or more forms to be parsed
base_uri: the URI of the document (note that the base URI used to submit
the form will be that given in the BASE element if present, not that of
the document)
For the other arguments and further details, see ParseResponse.__doc__.
"""
return _ParseFileEx(file, base_uri, *args, **kwds)[1:]
def _ParseFileEx(file, base_uri,
select_default=False,
ignore_errors=False,
form_parser_class=FormParser,
request_class=_request.Request,
entitydefs=None,
backwards_compat=True,
encoding=DEFAULT_ENCODING,
_urljoin=urlparse.urljoin,
_urlparse=urlparse.urlparse,
_urlunparse=urlparse.urlunparse,
):
if backwards_compat:
deprecation("operating in backwards-compatibility mode", 1)
fp = form_parser_class(entitydefs, encoding)
while 1:
data = file.read(CHUNK)
try:
fp.feed(data)
except ParseError, e:
e.base_uri = base_uri
raise
if len(data) != CHUNK: break
fp.close()
if fp.base is not None:
# HTML BASE element takes precedence over document URI
base_uri = fp.base
labels = [] # Label(label) for label in fp.labels]
id_to_labels = {}
for l in fp.labels:
label = Label(l)
labels.append(label)
for_id = l["for"]
coll = id_to_labels.get(for_id)
if coll is None:
id_to_labels[for_id] = [label]
else:
coll.append(label)
forms = []
for (name, action, method, enctype), attrs, controls in fp.forms:
if action is None:
action = base_uri
else:
action = _urljoin(base_uri, action)
# would be nice to make HTMLForm class (form builder) pluggable
form = HTMLForm(
action, method, enctype, name, attrs, request_class,
forms, labels, id_to_labels, backwards_compat)
form._urlparse = _urlparse
form._urlunparse = _urlunparse
for ii in range(len(controls)):
type, name, attrs = controls[ii]
# index=ii*10 allows ImageControl to return multiple ordered pairs
form.new_control(
type, name, attrs, select_default=select_default, index=ii*10)
forms.append(form)
for form in forms:
form.fixup()
return forms
class Label:
def __init__(self, attrs):
self.id = attrs.get("for")
self._text = attrs.get("__text").strip()
self._ctext = compress_text(self._text)
self.attrs = attrs
self._backwards_compat = False # maintained by HTMLForm
def __getattr__(self, name):
if name == "text":
if self._backwards_compat:
return self._text
else:
return self._ctext
return getattr(Label, name)
def __setattr__(self, name, value):
if name == "text":
# don't see any need for this, so make it read-only
raise AttributeError("text attribute is read-only")
self.__dict__[name] = value
def __str__(self):
return "<Label(id=%r, text=%r)>" % (self.id, self.text)
def _get_label(attrs):
text = attrs.get("__label")
if text is not None:
return Label(text)
else:
return None
class Control:
"""An HTML form control.
An HTMLForm contains a sequence of Controls. The Controls in an HTMLForm
are accessed using the HTMLForm.find_control method or the
HTMLForm.controls attribute.
Control instances are usually constructed using the ParseFile /
ParseResponse functions. If you use those functions, you can ignore the
rest of this paragraph. A Control is only properly initialised after the
fixup method has been called. In fact, this is only strictly necessary for
ListControl instances. This is necessary because ListControls are built up
from ListControls each containing only a single item, and their initial
value(s) can only be known after the sequence is complete.
The types and values that are acceptable for assignment to the value
attribute are defined by subclasses.
If the disabled attribute is true, this represents the state typically
represented by browsers by 'greying out' a control. If the disabled
attribute is true, the Control will raise AttributeError if an attempt is
made to change its value. In addition, the control will not be considered
'successful' as defined by the W3C HTML 4 standard -- ie. it will
contribute no data to the return value of the HTMLForm.click* methods. To
enable a control, set the disabled attribute to a false value.
If the readonly attribute is true, the Control will raise AttributeError if
an attempt is made to change its value. To make a control writable, set
the readonly attribute to a false value.
All controls have the disabled and readonly attributes, not only those that
may have the HTML attributes of the same names.
On assignment to the value attribute, the following exceptions are raised:
TypeError, AttributeError (if the value attribute should not be assigned
to, because the control is disabled, for example) and ValueError.
If the name or value attributes are None, or the value is an empty list, or
if the control is disabled, the control is not successful.
Public attributes:
type: string describing type of control (see the keys of the
HTMLForm.type2class dictionary for the allowable values) (readonly)
name: name of control (readonly)
value: current value of control (subclasses may allow a single value, a
sequence of values, or either)
disabled: disabled state
readonly: readonly state
id: value of id HTML attribute
"""
def __init__(self, type, name, attrs, index=None):
"""
type: string describing type of control (see the keys of the
HTMLForm.type2class dictionary for the allowable values)
name: control name
attrs: HTML attributes of control's HTML element
"""
raise NotImplementedError()
def add_to_form(self, form):
self._form = form
form.controls.append(self)
def fixup(self):
pass
def is_of_kind(self, kind):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def __getattr__(self, name): raise NotImplementedError()
def __setattr__(self, name, value): raise NotImplementedError()
def pairs(self):
"""Return list of (key, value) pairs suitable for passing to urlencode.
"""
return [(k, v) for (i, k, v) in self._totally_ordered_pairs()]
def _totally_ordered_pairs(self):
"""Return list of (key, value, index) tuples.
Like pairs, but allows preserving correct ordering even where several
controls are involved.
"""
raise NotImplementedError()
def _write_mime_data(self, mw, name, value):
"""Write data for a subitem of this control to a MimeWriter."""
# called by HTMLForm
mw2 = mw.nextpart()
mw2.addheader("Content-Disposition",
'form-data; name="%s"' % name, 1)
f = mw2.startbody(prefix=0)
f.write(value)
def __str__(self):
raise NotImplementedError()
def get_labels(self):
"""Return all labels (Label instances) for this control.
If the control was surrounded by a <label> tag, that will be the first
label; all other labels, connected by 'for' and 'id', are in the order
that appear in the HTML.
"""
res = []
if self._label:
res.append(self._label)
if self.id:
res.extend(self._form._id_to_labels.get(self.id, ()))
return res
#---------------------------------------------------
class ScalarControl(Control):
"""Control whose value is not restricted to one of a prescribed set.
Some ScalarControls don't accept any value attribute. Otherwise, takes a
single value, which must be string-like.
Additional read-only public attribute:
attrs: dictionary mapping the names of original HTML attributes of the
control to their values
"""
def __init__(self, type, name, attrs, index=None):
self._index = index
self._label = _get_label(attrs)
self.__dict__["type"] = type.lower()
self.__dict__["name"] = name
self._value = attrs.get("value")
self.disabled = attrs.has_key("disabled")
self.readonly = attrs.has_key("readonly")
self.id = attrs.get("id")
self.attrs = attrs.copy()
self._clicked = False
self._urlparse = urlparse.urlparse
self._urlunparse = urlparse.urlunparse
def __getattr__(self, name):
if name == "value":
return self.__dict__["_value"]
else:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name == "value":
if not isstringlike(value):
raise TypeError("must assign a string")
elif self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
elif self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
self.__dict__["_value"] = value
elif name in ("name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def _totally_ordered_pairs(self):
name = self.name
value = self.value
if name is None or value is None or self.disabled:
return []
return [(self._index, name, value)]
def clear(self):
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self.__dict__["_value"] = None
def __str__(self):
name = self.name
value = self.value
if name is None: name = "<None>"
if value is None: value = "<None>"
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s=%s)%s>" % (self.__class__.__name__, name, value, info)
#---------------------------------------------------
class TextControl(ScalarControl):
"""Textual input control.
Covers:
INPUT/TEXT
INPUT/PASSWORD
INPUT/HIDDEN
TEXTAREA
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
if self.type == "hidden": self.readonly = True
if self._value is None:
self._value = ""
def is_of_kind(self, kind): return kind == "text"
#---------------------------------------------------
class FileControl(ScalarControl):
"""File upload with INPUT TYPE=FILE.
The value attribute of a FileControl is always None. Use add_file instead.
Additional public method: add_file
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
self._value = None
self._upload_data = []
def is_of_kind(self, kind): return kind == "file"
def clear(self):
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self._upload_data = []
def __setattr__(self, name, value):
if name in ("value", "name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def add_file(self, file_object, content_type=None, filename=None):
if not hasattr(file_object, "read"):
raise TypeError("file-like object must have read method")
if content_type is not None and not isstringlike(content_type):
raise TypeError("content type must be None or string-like")
if filename is not None and not isstringlike(filename):
raise TypeError("filename must be None or string-like")
if content_type is None:
content_type = "application/octet-stream"
self._upload_data.append((file_object, content_type, filename))
def _totally_ordered_pairs(self):
# XXX should it be successful even if unnamed?
if self.name is None or self.disabled:
return []
return [(self._index, self.name, "")]
# If enctype is application/x-www-form-urlencoded and there's a FILE
# control present, what should be sent? Strictly, it should be 'name=data'
# (see HTML 4.01 spec., section 17.13.2), but code sends "name=" ATM. What
# about multiple file upload?
def _write_mime_data(self, mw, _name, _value):
# called by HTMLForm
# assert _name == self.name and _value == ''
if len(self._upload_data) < 2:
if len(self._upload_data) == 0:
file_object = StringIO()
content_type = "application/octet-stream"
filename = ""
else:
file_object, content_type, filename = self._upload_data[0]
if filename is None:
filename = ""
mw2 = mw.nextpart()
fn_part = '; filename="%s"' % filename
disp = 'form-data; name="%s"%s' % (self.name, fn_part)
mw2.addheader("Content-Disposition", disp, prefix=1)
fh = mw2.startbody(content_type, prefix=0)
fh.write(file_object.read())
else:
# multiple files
mw2 = mw.nextpart()
disp = 'form-data; name="%s"' % self.name
mw2.addheader("Content-Disposition", disp, prefix=1)
fh = mw2.startmultipartbody("mixed", prefix=0)
for file_object, content_type, filename in self._upload_data:
mw3 = mw2.nextpart()
if filename is None:
filename = ""
fn_part = '; filename="%s"' % filename
disp = "file%s" % fn_part
mw3.addheader("Content-Disposition", disp, prefix=1)
fh2 = mw3.startbody(content_type, prefix=0)
fh2.write(file_object.read())
mw2.lastpart()
def __str__(self):
name = self.name
if name is None: name = "<None>"
if not self._upload_data:
value = "<No files added>"
else:
value = []
for file, ctype, filename in self._upload_data:
if filename is None:
value.append("<Unnamed file>")
else:
value.append(filename)
value = ", ".join(value)
info = []
if self.disabled: info.append("disabled")
if self.readonly: info.append("readonly")
info = ", ".join(info)
if info: info = " (%s)" % info
return "<%s(%s=%s)%s>" % (self.__class__.__name__, name, value, info)
#---------------------------------------------------
class IsindexControl(ScalarControl):
"""ISINDEX control.
ISINDEX is the odd-one-out of HTML form controls. In fact, it isn't really
part of regular HTML forms at all, and predates it. You're only allowed
one ISINDEX per HTML document. ISINDEX and regular form submission are
mutually exclusive -- either submit a form, or the ISINDEX.
Having said this, since ISINDEX controls may appear in forms (which is
probably bad HTML), ParseFile / ParseResponse will include them in the
HTMLForm instances it returns. You can set the ISINDEX's value, as with
any other control (but note that ISINDEX controls have no name, so you'll
need to use the type argument of set_value!). When you submit the form,
the ISINDEX will not be successful (ie., no data will get returned to the
server as a result of its presence), unless you click on the ISINDEX
control, in which case the ISINDEX gets submitted instead of the form:
form.set_value("my isindex value", type="isindex")
mechanize.urlopen(form.click(type="isindex"))
ISINDEX elements outside of FORMs are ignored. If you want to submit one
by hand, do it like so:
url = urlparse.urljoin(page_uri, "?"+urllib.quote_plus("my isindex value"))
result = mechanize.urlopen(url)
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
if self._value is None:
self._value = ""
def is_of_kind(self, kind): return kind in ["text", "clickable"]
def _totally_ordered_pairs(self):
return []
def _click(self, form, coord, return_type, request_class=_request.Request):
# Relative URL for ISINDEX submission: instead of "foo=bar+baz",
# want "bar+baz".
# This doesn't seem to be specified in HTML 4.01 spec. (ISINDEX is
# deprecated in 4.01, but it should still say how to submit it).
# Submission of ISINDEX is explained in the HTML 3.2 spec, though.
parts = self._urlparse(form.action)
rest, (query, frag) = parts[:-2], parts[-2:]
parts = rest + (urllib.quote_plus(self.value), None)
url = self._urlunparse(parts)
req_data = url, None, []
if return_type == "pairs":
return []
elif return_type == "request_data":
return req_data
else:
return request_class(url)
def __str__(self):
value = self.value
if value is None: value = "<None>"
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s)%s>" % (self.__class__.__name__, value, info)
#---------------------------------------------------
class IgnoreControl(ScalarControl):
"""Control that we're not interested in.
Covers:
INPUT/RESET
BUTTON/RESET
INPUT/BUTTON
BUTTON/BUTTON
These controls are always unsuccessful, in the terminology of HTML 4 (ie.
they never require any information to be returned to the server).
BUTTON/BUTTON is used to generate events for script embedded in HTML.
The value attribute of IgnoreControl is always None.
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
self._value = None
def is_of_kind(self, kind): return False
def __setattr__(self, name, value):
if name == "value":
raise AttributeError(
"control '%s' is ignored, hence read-only" % self.name)
elif name in ("name", "type"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
#---------------------------------------------------
# ListControls
# helpers and subsidiary classes
class Item:
def __init__(self, control, attrs, index=None):
label = _get_label(attrs)
self.__dict__.update({
"name": attrs["value"],
"_labels": label and [label] or [],
"attrs": attrs,
"_control": control,
"disabled": attrs.has_key("disabled"),
"_selected": False,
"id": attrs.get("id"),
"_index": index,
})
control.items.append(self)
def get_labels(self):
"""Return all labels (Label instances) for this item.
For items that represent radio buttons or checkboxes, if the item was
surrounded by a <label> tag, that will be the first label; all other
labels, connected by 'for' and 'id', are in the order that appear in
the HTML.
For items that represent select options, if the option had a label
attribute, that will be the first label. If the option has contents
(text within the option tags) and it is not the same as the label
attribute (if any), that will be a label. There is nothing in the
spec to my knowledge that makes an option with an id unable to be the
target of a label's for attribute, so those are included, if any, for
the sake of consistency and completeness.
"""
res = []
res.extend(self._labels)
if self.id:
res.extend(self._control._form._id_to_labels.get(self.id, ()))
return res
def __getattr__(self, name):
if name=="selected":
return self._selected
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "selected":
self._control._set_selected_state(self, value)
elif name == "disabled":
self.__dict__["disabled"] = bool(value)
else:
raise AttributeError(name)
def __str__(self):
res = self.name
if self.selected:
res = "*" + res
if self.disabled:
res = "(%s)" % res
return res
def __repr__(self):
# XXX appending the attrs without distinguishing them from name and id
# is silly
attrs = [("name", self.name), ("id", self.id)]+self.attrs.items()
return "<%s %s>" % (
self.__class__.__name__,
" ".join(["%s=%r" % (k, v) for k, v in attrs])
)
def disambiguate(items, nr, **kwds):
msgs = []
for key, value in kwds.items():
msgs.append("%s=%r" % (key, value))
msg = " ".join(msgs)
if not items:
raise ItemNotFoundError(msg)
if nr is None:
if len(items) > 1:
raise AmbiguityError(msg)
nr = 0
if len(items) <= nr:
raise ItemNotFoundError(msg)
return items[nr]
class ListControl(Control):
"""Control representing a sequence of items.
The value attribute of a ListControl represents the successful list items
in the control. The successful list items are those that are selected and
not disabled.
ListControl implements both list controls that take a length-1 value
(single-selection) and those that take length >1 values
(multiple-selection).
ListControls accept sequence values only. Some controls only accept
sequences of length 0 or 1 (RADIO, and single-selection SELECT).
In those cases, ItemCountError is raised if len(sequence) > 1. CHECKBOXes
and multiple-selection SELECTs (those having the "multiple" HTML attribute)
accept sequences of any length.
Note the following mistake:
control.value = some_value
assert control.value == some_value # not necessarily true
The reason for this is that the value attribute always gives the list items
in the order they were listed in the HTML.
ListControl items can also be referred to by their labels instead of names.
Use the label argument to .get(), and the .set_value_by_label(),
.get_value_by_label() methods.
Note that, rather confusingly, though SELECT controls are represented in
HTML by SELECT elements (which contain OPTION elements, representing
individual list items), CHECKBOXes and RADIOs are not represented by *any*
element. Instead, those controls are represented by a collection of INPUT
elements. For example, this is a SELECT control, named "control1":
<select name="control1">
<option>foo</option>
<option value="1">bar</option>
</select>
and this is a CHECKBOX control, named "control2":
<input type="checkbox" name="control2" value="foo" id="cbe1">
<input type="checkbox" name="control2" value="bar" id="cbe2">
The id attribute of a CHECKBOX or RADIO ListControl is always that of its
first element (for example, "cbe1" above).
Additional read-only public attribute: multiple.
"""
# ListControls are built up by the parser from their component items by
# creating one ListControl per item, consolidating them into a single
# master ListControl held by the HTMLForm:
# -User calls form.new_control(...)
# -Form creates Control, and calls control.add_to_form(self).
# -Control looks for a Control with the same name and type in the form,
# and if it finds one, merges itself with that control by calling
# control.merge_control(self). The first Control added to the form, of
# a particular name and type, is the only one that survives in the
# form.
# -Form calls control.fixup for all its controls. ListControls in the
# form know they can now safely pick their default values.
# To create a ListControl without an HTMLForm, use:
# control.merge_control(new_control)
# (actually, it's much easier just to use ParseFile)
_label = None
def __init__(self, type, name, attrs={}, select_default=False,
called_as_base_class=False, index=None):
"""
select_default: for RADIO and multiple-selection SELECT controls, pick
the first item as the default if no 'selected' HTML attribute is
present
"""
if not called_as_base_class:
raise NotImplementedError()
self.__dict__["type"] = type.lower()
self.__dict__["name"] = name
self._value = attrs.get("value")
self.disabled = False
self.readonly = False
self.id = attrs.get("id")
self._closed = False
# As Controls are merged in with .merge_control(), self.attrs will
# refer to each Control in turn -- always the most recently merged
# control. Each merged-in Control instance corresponds to a single
# list item: see ListControl.__doc__.
self.items = []
self._form = None
self._select_default = select_default
self._clicked = False
def clear(self):
self.value = []
def is_of_kind(self, kind):
if kind == "list":
return True
elif kind == "multilist":
return bool(self.multiple)
elif kind == "singlelist":
return not self.multiple
else:
return False
def get_items(self, name=None, label=None, id=None,
exclude_disabled=False):
"""Return matching items by name or label.
For argument docs, see the docstring for .get()
"""
if name is not None and not isstringlike(name):
raise TypeError("item name must be string-like")
if label is not None and not isstringlike(label):
raise TypeError("item label must be string-like")
if id is not None and not isstringlike(id):
raise TypeError("item id must be string-like")
items = [] # order is important
compat = self._form.backwards_compat
for o in self.items:
if exclude_disabled and o.disabled:
continue
if name is not None and o.name != name:
continue
if label is not None:
for l in o.get_labels():
if ((compat and l.text == label) or
(not compat and l.text.find(label) > -1)):
break
else:
continue
if id is not None and o.id != id:
continue
items.append(o)
return items
def get(self, name=None, label=None, id=None, nr=None,
exclude_disabled=False):
"""Return item by name or label, disambiguating if necessary with nr.
All arguments must be passed by name, with the exception of 'name',
which may be used as a positional argument.
If name is specified, then the item must have the indicated name.
If label is specified, then the item must have a label whose
whitespace-compressed, stripped, text substring-matches the indicated
label string (e.g. label="please choose" will match
" Do please choose an item ").
If id is specified, then the item must have the indicated id.
nr is an optional 0-based index of the items matching the query.
If nr is the default None value and more than item is found, raises
AmbiguityError (unless the HTMLForm instance's backwards_compat
attribute is true).
If no item is found, or if items are found but nr is specified and not
found, raises ItemNotFoundError.
Optionally excludes disabled items.
"""
if nr is None and self._form.backwards_compat:
nr = 0 # :-/
items = self.get_items(name, label, id, exclude_disabled)
return disambiguate(items, nr, name=name, label=label, id=id)
def _get(self, name, by_label=False, nr=None, exclude_disabled=False):
# strictly for use by deprecated methods
if by_label:
name, label = None, name
else:
name, label = name, None
return self.get(name, label, nr, exclude_disabled)
def toggle(self, name, by_label=False, nr=None):
"""Deprecated: given a name or label and optional disambiguating index
nr, toggle the matching item's selection.
Selecting items follows the behavior described in the docstring of the
'get' method.
if the item is disabled, or this control is disabled or readonly,
raise AttributeError.
"""
deprecation(
"item = control.get(...); item.selected = not item.selected")
o = self._get(name, by_label, nr)
self._set_selected_state(o, not o.selected)
def set(self, selected, name, by_label=False, nr=None):
"""Deprecated: given a name or label and optional disambiguating index
nr, set the matching item's selection to the bool value of selected.
Selecting items follows the behavior described in the docstring of the
'get' method.
if the item is disabled, or this control is disabled or readonly,
raise AttributeError.
"""
deprecation(
"control.get(...).selected = <boolean>")
self._set_selected_state(self._get(name, by_label, nr), selected)
def _set_selected_state(self, item, action):
# action:
# bool False: off
# bool True: on
if self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
action == bool(action)
compat = self._form.backwards_compat
if not compat and item.disabled:
raise AttributeError("item is disabled")
else:
if compat and item.disabled and action:
raise AttributeError("item is disabled")
if self.multiple:
item.__dict__["_selected"] = action
else:
if not action:
item.__dict__["_selected"] = False
else:
for o in self.items:
o.__dict__["_selected"] = False
item.__dict__["_selected"] = True
def toggle_single(self, by_label=None):
"""Deprecated: toggle the selection of the single item in this control.
Raises ItemCountError if the control does not contain only one item.
by_label argument is ignored, and included only for backwards
compatibility.
"""
deprecation(
"control.items[0].selected = not control.items[0].selected")
if len(self.items) != 1:
raise ItemCountError(
"'%s' is not a single-item control" % self.name)
item = self.items[0]
self._set_selected_state(item, not item.selected)
def set_single(self, selected, by_label=None):
"""Deprecated: set the selection of the single item in this control.
Raises ItemCountError if the control does not contain only one item.
by_label argument is ignored, and included only for backwards
compatibility.
"""
deprecation(
"control.items[0].selected = <boolean>")
if len(self.items) != 1:
raise ItemCountError(
"'%s' is not a single-item control" % self.name)
self._set_selected_state(self.items[0], selected)
def get_item_disabled(self, name, by_label=False, nr=None):
"""Get disabled state of named list item in a ListControl."""
deprecation(
"control.get(...).disabled")
return self._get(name, by_label, nr).disabled
def set_item_disabled(self, disabled, name, by_label=False, nr=None):
"""Set disabled state of named list item in a ListControl.
disabled: boolean disabled state
"""
deprecation(
"control.get(...).disabled = <boolean>")
self._get(name, by_label, nr).disabled = disabled
def set_all_items_disabled(self, disabled):
"""Set disabled state of all list items in a ListControl.
disabled: boolean disabled state
"""
for o in self.items:
o.disabled = disabled
def get_item_attrs(self, name, by_label=False, nr=None):
"""Return dictionary of HTML attributes for a single ListControl item.
The HTML element types that describe list items are: OPTION for SELECT
controls, INPUT for the rest. These elements have HTML attributes that
you may occasionally want to know about -- for example, the "alt" HTML
attribute gives a text string describing the item (graphical browsers
usually display this as a tooltip).
The returned dictionary maps HTML attribute names to values. The names
and values are taken from the original HTML.
"""
deprecation(
"control.get(...).attrs")
return self._get(name, by_label, nr).attrs
def close_control(self):
self._closed = True
def add_to_form(self, form):
assert self._form is None or form == self._form, (
"can't add control to more than one form")
self._form = form
if self.name is None:
# always count nameless elements as separate controls
Control.add_to_form(self, form)
else:
for ii in range(len(form.controls)-1, -1, -1):
control = form.controls[ii]
if control.name == self.name and control.type == self.type:
if control._closed:
Control.add_to_form(self, form)
else:
control.merge_control(self)
break
else:
Control.add_to_form(self, form)
def merge_control(self, control):
assert bool(control.multiple) == bool(self.multiple)
# usually, isinstance(control, self.__class__)
self.items.extend(control.items)
def fixup(self):
"""
ListControls are built up from component list items (which are also
ListControls) during parsing. This method should be called after all
items have been added. See ListControl.__doc__ for the reason this is
required.
"""
# Need to set default selection where no item was indicated as being
# selected by the HTML:
# CHECKBOX:
# Nothing should be selected.
# SELECT/single, SELECT/multiple and RADIO:
# RFC 1866 (HTML 2.0): says first item should be selected.
# W3C HTML 4.01 Specification: says that client behaviour is
# undefined in this case. For RADIO, exactly one must be selected,
# though which one is undefined.
# Both Netscape and Microsoft Internet Explorer (IE) choose first
# item for SELECT/single. However, both IE5 and Mozilla (both 1.0
# and Firebird 0.6) leave all items unselected for RADIO and
# SELECT/multiple.
# Since both Netscape and IE all choose the first item for
# SELECT/single, we do the same. OTOH, both Netscape and IE
# leave SELECT/multiple with nothing selected, in violation of RFC 1866
# (but not in violation of the W3C HTML 4 standard); the same is true
# of RADIO (which *is* in violation of the HTML 4 standard). We follow
# RFC 1866 if the _select_default attribute is set, and Netscape and IE
# otherwise. RFC 1866 and HTML 4 are always violated insofar as you
# can deselect all items in a RadioControl.
for o in self.items:
# set items' controls to self, now that we've merged
o.__dict__["_control"] = self
def __getattr__(self, name):
if name == "value":
compat = self._form.backwards_compat
if self.name is None:
return []
return [o.name for o in self.items if o.selected and
(not o.disabled or compat)]
else:
raise AttributeError("%s instance has no attribute '%s'" %
(self.__class__.__name__, name))
def __setattr__(self, name, value):
if name == "value":
if self.disabled:
raise AttributeError("control '%s' is disabled" % self.name)
if self.readonly:
raise AttributeError("control '%s' is readonly" % self.name)
self._set_value(value)
elif name in ("name", "type", "multiple"):
raise AttributeError("%s attribute is readonly" % name)
else:
self.__dict__[name] = value
def _set_value(self, value):
if value is None or isstringlike(value):
raise TypeError("ListControl, must set a sequence")
if not value:
compat = self._form.backwards_compat
for o in self.items:
if not o.disabled or compat:
o.selected = False
elif self.multiple:
self._multiple_set_value(value)
elif len(value) > 1:
raise ItemCountError(
"single selection list, must set sequence of "
"length 0 or 1")
else:
self._single_set_value(value)
def _get_items(self, name, target=1):
all_items = self.get_items(name)
items = [o for o in all_items if not o.disabled]
if len(items) < target:
if len(all_items) < target:
raise ItemNotFoundError(
"insufficient items with name %r" % name)
else:
raise AttributeError(
"insufficient non-disabled items with name %s" % name)
on = []
off = []
for o in items:
if o.selected:
on.append(o)
else:
off.append(o)
return on, off
def _single_set_value(self, value):
assert len(value) == 1
on, off = self._get_items(value[0])
assert len(on) <= 1
if not on:
off[0].selected = True
def _multiple_set_value(self, value):
compat = self._form.backwards_compat
turn_on = [] # transactional-ish
turn_off = [item for item in self.items if
item.selected and (not item.disabled or compat)]
names = {}
for nn in value:
if nn in names.keys():
names[nn] += 1
else:
names[nn] = 1
for name, count in names.items():
on, off = self._get_items(name, count)
for i in range(count):
if on:
item = on[0]
del on[0]
del turn_off[turn_off.index(item)]
else:
item = off[0]
del off[0]
turn_on.append(item)
for item in turn_off:
item.selected = False
for item in turn_on:
item.selected = True
def set_value_by_label(self, value):
"""Set the value of control by item labels.
value is expected to be an iterable of strings that are substrings of
the item labels that should be selected. Before substring matching is
performed, the original label text is whitespace-compressed
(consecutive whitespace characters are converted to a single space
character) and leading and trailing whitespace is stripped. Ambiguous
labels are accepted without complaint if the form's backwards_compat is
True; otherwise, it will not complain as long as all ambiguous labels
share the same item name (e.g. OPTION value).
"""
if isstringlike(value):
raise TypeError(value)
if not self.multiple and len(value) > 1:
raise ItemCountError(
"single selection list, must set sequence of "
"length 0 or 1")
items = []
for nn in value:
found = self.get_items(label=nn)
if len(found) > 1:
if not self._form.backwards_compat:
# ambiguous labels are fine as long as item names (e.g.
# OPTION values) are same
opt_name = found[0].name
if [o for o in found[1:] if o.name != opt_name]:
raise AmbiguityError(nn)
else:
# OK, we'll guess :-( Assume first available item.
found = found[:1]
for o in found:
# For the multiple-item case, we could try to be smarter,
# saving them up and trying to resolve, but that's too much.
if self._form.backwards_compat or o not in items:
items.append(o)
break
else: # all of them are used
raise ItemNotFoundError(nn)
# now we have all the items that should be on
# let's just turn everything off and then back on.
self.value = []
for o in items:
o.selected = True
def get_value_by_label(self):
"""Return the value of the control as given by normalized labels."""
res = []
compat = self._form.backwards_compat
for o in self.items:
if (not o.disabled or compat) and o.selected:
for l in o.get_labels():
if l.text:
res.append(l.text)
break
else:
res.append(None)
return res
def possible_items(self, by_label=False):
"""Deprecated: return the names or labels of all possible items.
Includes disabled items, which may be misleading for some use cases.
"""
deprecation(
"[item.name for item in self.items]")
if by_label:
res = []
for o in self.items:
for l in o.get_labels():
if l.text:
res.append(l.text)
break
else:
res.append(None)
return res
return [o.name for o in self.items]
def _totally_ordered_pairs(self):
if self.disabled or self.name is None:
return []
else:
return [(o._index, self.name, o.name) for o in self.items
if o.selected and not o.disabled]
def __str__(self):
name = self.name
if name is None: name = "<None>"
display = [str(o) for o in self.items]
infos = []
if self.disabled: infos.append("disabled")
if self.readonly: infos.append("readonly")
info = ", ".join(infos)
if info: info = " (%s)" % info
return "<%s(%s=[%s])%s>" % (self.__class__.__name__,
name, ", ".join(display), info)
class RadioControl(ListControl):
"""
Covers:
INPUT/RADIO
"""
def __init__(self, type, name, attrs, select_default=False, index=None):
attrs.setdefault("value", "on")
ListControl.__init__(self, type, name, attrs, select_default,
called_as_base_class=True, index=index)
self.__dict__["multiple"] = False
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("checked")
def fixup(self):
ListControl.fixup(self)
found = [o for o in self.items if o.selected and not o.disabled]
if not found:
if self._select_default:
for o in self.items:
if not o.disabled:
o.selected = True
break
else:
# Ensure only one item selected. Choose the last one,
# following IE and Firefox.
for o in found[:-1]:
o.selected = False
def get_labels(self):
return []
class CheckboxControl(ListControl):
"""
Covers:
INPUT/CHECKBOX
"""
def __init__(self, type, name, attrs, select_default=False, index=None):
attrs.setdefault("value", "on")
ListControl.__init__(self, type, name, attrs, select_default,
called_as_base_class=True, index=index)
self.__dict__["multiple"] = True
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("checked")
def get_labels(self):
return []
class SelectControl(ListControl):
"""
Covers:
SELECT (and OPTION)
OPTION 'values', in HTML parlance, are Item 'names' in mechanize parlance.
SELECT control values and labels are subject to some messy defaulting
rules. For example, if the HTML representation of the control is:
<SELECT name=year>
<OPTION value=0 label="2002">current year</OPTION>
<OPTION value=1>2001</OPTION>
<OPTION>2000</OPTION>
</SELECT>
The items, in order, have labels "2002", "2001" and "2000", whereas their
names (the OPTION values) are "0", "1" and "2000" respectively. Note that
the value of the last OPTION in this example defaults to its contents, as
specified by RFC 1866, as do the labels of the second and third OPTIONs.
The OPTION labels are sometimes more meaningful than the OPTION values,
which can make for more maintainable code.
Additional read-only public attribute: attrs
The attrs attribute is a dictionary of the original HTML attributes of the
SELECT element. Other ListControls do not have this attribute, because in
other cases the control as a whole does not correspond to any single HTML
element. control.get(...).attrs may be used as usual to get at the HTML
attributes of the HTML elements corresponding to individual list items (for
SELECT controls, these are OPTION elements).
Another special case is that the Item.attrs dictionaries have a special key
"contents" which does not correspond to any real HTML attribute, but rather
contains the contents of the OPTION element:
<OPTION>this bit</OPTION>
"""
# HTML attributes here are treated slightly differently from other list
# controls:
# -The SELECT HTML attributes dictionary is stuffed into the OPTION
# HTML attributes dictionary under the "__select" key.
# -The content of each OPTION element is stored under the special
# "contents" key of the dictionary.
# After all this, the dictionary is passed to the SelectControl constructor
# as the attrs argument, as usual. However:
# -The first SelectControl constructed when building up a SELECT control
# has a constructor attrs argument containing only the __select key -- so
# this SelectControl represents an empty SELECT control.
# -Subsequent SelectControls have both OPTION HTML-attribute in attrs and
# the __select dictionary containing the SELECT HTML-attributes.
def __init__(self, type, name, attrs, select_default=False, index=None):
# fish out the SELECT HTML attributes from the OPTION HTML attributes
# dictionary
self.attrs = attrs["__select"].copy()
self.__dict__["_label"] = _get_label(self.attrs)
self.__dict__["id"] = self.attrs.get("id")
self.__dict__["multiple"] = self.attrs.has_key("multiple")
# the majority of the contents, label, and value dance already happened
contents = attrs.get("contents")
attrs = attrs.copy()
del attrs["__select"]
ListControl.__init__(self, type, name, self.attrs, select_default,
called_as_base_class=True, index=index)
self.disabled = self.attrs.has_key("disabled")
self.readonly = self.attrs.has_key("readonly")
if attrs.has_key("value"):
# otherwise it is a marker 'select started' token
o = Item(self, attrs, index)
o.__dict__["_selected"] = attrs.has_key("selected")
# add 'label' label and contents label, if different. If both are
# provided, the 'label' label is used for display in HTML
# 4.0-compliant browsers (and any lower spec? not sure) while the
# contents are used for display in older or less-compliant
# browsers. We make label objects for both, if the values are
# different.
label = attrs.get("label")
if label:
o._labels.append(Label({"__text": label}))
if contents and contents != label:
o._labels.append(Label({"__text": contents}))
elif contents:
o._labels.append(Label({"__text": contents}))
def fixup(self):
ListControl.fixup(self)
# Firefox doesn't exclude disabled items from those considered here
# (i.e. from 'found', for both branches of the if below). Note that
# IE6 doesn't support the disabled attribute on OPTIONs at all.
found = [o for o in self.items if o.selected]
if not found:
if not self.multiple or self._select_default:
for o in self.items:
if not o.disabled:
was_disabled = self.disabled
self.disabled = False
try:
o.selected = True
finally:
o.disabled = was_disabled
break
elif not self.multiple:
# Ensure only one item selected. Choose the last one,
# following IE and Firefox.
for o in found[:-1]:
o.selected = False
#---------------------------------------------------
class SubmitControl(ScalarControl):
"""
Covers:
INPUT/SUBMIT
BUTTON/SUBMIT
"""
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
# IE5 defaults SUBMIT value to "Submit Query"; Firebird 0.6 leaves it
# blank, Konqueror 3.1 defaults to "Submit". HTML spec. doesn't seem
# to define this.
if self.value is None: self.value = ""
self.readonly = True
def get_labels(self):
res = []
if self.value:
res.append(Label({"__text": self.value}))
res.extend(ScalarControl.get_labels(self))
return res
def is_of_kind(self, kind): return kind == "clickable"
def _click(self, form, coord, return_type, request_class=_request.Request):
self._clicked = coord
r = form._switch_click(return_type, request_class)
self._clicked = False
return r
def _totally_ordered_pairs(self):
if not self._clicked:
return []
return ScalarControl._totally_ordered_pairs(self)
#---------------------------------------------------
class ImageControl(SubmitControl):
"""
Covers:
INPUT/IMAGE
Coordinates are specified using one of the HTMLForm.click* methods.
"""
def __init__(self, type, name, attrs, index=None):
SubmitControl.__init__(self, type, name, attrs, index)
self.readonly = False
def _totally_ordered_pairs(self):
clicked = self._clicked
if self.disabled or not clicked:
return []
name = self.name
if name is None: return []
pairs = [
(self._index, "%s.x" % name, str(clicked[0])),
(self._index+1, "%s.y" % name, str(clicked[1])),
]
value = self._value
if value:
pairs.append((self._index+2, name, value))
return pairs
get_labels = ScalarControl.get_labels
# aliases, just to make str(control) and str(form) clearer
class PasswordControl(TextControl): pass
class HiddenControl(TextControl): pass
class TextareaControl(TextControl): pass
class SubmitButtonControl(SubmitControl): pass
def is_listcontrol(control): return control.is_of_kind("list")
class HTMLForm:
"""Represents a single HTML <form> ... </form> element.
A form consists of a sequence of controls that usually have names, and
which can take on various values. The values of the various types of
controls represent variously: text, zero-or-one-of-many or many-of-many
choices, and files to be uploaded. Some controls can be clicked on to
submit the form, and clickable controls' values sometimes include the
coordinates of the click.
Forms can be filled in with data to be returned to the server, and then
submitted, using the click method to generate a request object suitable for
passing to mechanize.urlopen (or the click_request_data or click_pairs
methods for integration with third-party code).
import mechanize
forms = mechanize.ParseFile(html, base_uri)
form = forms[0]
form["query"] = "Python"
form.find_control("nr_results").get("lots").selected = True
response = mechanize.urlopen(form.click())
Usually, HTMLForm instances are not created directly. Instead, the
ParseFile or ParseResponse factory functions are used. If you do construct
HTMLForm objects yourself, however, note that an HTMLForm instance is only
properly initialised after the fixup method has been called (ParseFile and
ParseResponse do this for you). See ListControl.__doc__ for the reason
this is required.
Indexing a form (form["control_name"]) returns the named Control's value
attribute. Assignment to a form index (form["control_name"] = something)
is equivalent to assignment to the named Control's value attribute. If you
need to be more specific than just supplying the control's name, use the
set_value and get_value methods.
ListControl values are lists of item names (specifically, the names of the
items that are selected and not disabled, and hence are "successful" -- ie.
cause data to be returned to the server). The list item's name is the
value of the corresponding HTML element's"value" attribute.
Example:
<INPUT type="CHECKBOX" name="cheeses" value="leicester"></INPUT>
<INPUT type="CHECKBOX" name="cheeses" value="cheddar"></INPUT>
defines a CHECKBOX control with name "cheeses" which has two items, named
"leicester" and "cheddar".
Another example:
<SELECT name="more_cheeses">
<OPTION>1</OPTION>
<OPTION value="2" label="CHEDDAR">cheddar</OPTION>
</SELECT>
defines a SELECT control with name "more_cheeses" which has two items,
named "1" and "2" (because the OPTION element's value HTML attribute
defaults to the element contents -- see SelectControl.__doc__ for more on
these defaulting rules).
To select, deselect or otherwise manipulate individual list items, use the
HTMLForm.find_control() and ListControl.get() methods. To set the whole
value, do as for any other control: use indexing or the set_/get_value
methods.
Example:
# select *only* the item named "cheddar"
form["cheeses"] = ["cheddar"]
# select "cheddar", leave other items unaffected
form.find_control("cheeses").get("cheddar").selected = True
Some controls (RADIO and SELECT without the multiple attribute) can only
have zero or one items selected at a time. Some controls (CHECKBOX and
SELECT with the multiple attribute) can have multiple items selected at a
time. To set the whole value of a ListControl, assign a sequence to a form
index:
form["cheeses"] = ["cheddar", "leicester"]
If the ListControl is not multiple-selection, the assigned list must be of
length one.
To check if a control has an item, if an item is selected, or if an item is
successful (selected and not disabled), respectively:
"cheddar" in [item.name for item in form.find_control("cheeses").items]
"cheddar" in [item.name for item in form.find_control("cheeses").items and
item.selected]
"cheddar" in form["cheeses"] # (or "cheddar" in form.get_value("cheeses"))
Note that some list items may be disabled (see below).
Note the following mistake:
form[control_name] = control_value
assert form[control_name] == control_value # not necessarily true
The reason for this is that form[control_name] always gives the list items
in the order they were listed in the HTML.
List items (hence list values, too) can be referred to in terms of list
item labels rather than list item names using the appropriate label
arguments. Note that each item may have several labels.
The question of default values of OPTION contents, labels and values is
somewhat complicated: see SelectControl.__doc__ and
ListControl.get_item_attrs.__doc__ if you think you need to know.
Controls can be disabled or readonly. In either case, the control's value
cannot be changed until you clear those flags (see example below).
Disabled is the state typically represented by browsers by 'greying out' a
control. Disabled controls are not 'successful' -- they don't cause data
to get returned to the server. Readonly controls usually appear in
browsers as read-only text boxes. Readonly controls are successful. List
items can also be disabled. Attempts to select or deselect disabled items
fail with AttributeError.
If a lot of controls are readonly, it can be useful to do this:
form.set_all_readonly(False)
To clear a control's value attribute, so that it is not successful (until a
value is subsequently set):
form.clear("cheeses")
More examples:
control = form.find_control("cheeses")
control.disabled = False
control.readonly = False
control.get("gruyere").disabled = True
control.items[0].selected = True
See the various Control classes for further documentation. Many methods
take name, type, kind, id, label and nr arguments to specify the control to
be operated on: see HTMLForm.find_control.__doc__.
ControlNotFoundError (subclass of ValueError) is raised if the specified
control can't be found. This includes occasions where a non-ListControl
is found, but the method (set, for example) requires a ListControl.
ItemNotFoundError (subclass of ValueError) is raised if a list item can't
be found. ItemCountError (subclass of ValueError) is raised if an attempt
is made to select more than one item and the control doesn't allow that, or
set/get_single are called and the control contains more than one item.
AttributeError is raised if a control or item is readonly or disabled and
an attempt is made to alter its value.
Security note: Remember that any passwords you store in HTMLForm instances
will be saved to disk in the clear if you pickle them (directly or
indirectly). The simplest solution to this is to avoid pickling HTMLForm
objects. You could also pickle before filling in any password, or just set
the password to "" before pickling.
Public attributes:
action: full (absolute URI) form action
method: "GET" or "POST"
enctype: form transfer encoding MIME type
name: name of form (None if no name was specified)
attrs: dictionary mapping original HTML form attributes to their values
controls: list of Control instances; do not alter this list
(instead, call form.new_control to make a Control and add it to the
form, or control.add_to_form if you already have a Control instance)
Methods for form filling:
-------------------------
Most of the these methods have very similar arguments. See
HTMLForm.find_control.__doc__ for details of the name, type, kind, label
and nr arguments.
def find_control(self,
name=None, type=None, kind=None, id=None, predicate=None,
nr=None, label=None)
get_value(name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None)
set_value(value,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None)
clear_all()
clear(name=None, type=None, kind=None, id=None, nr=None, label=None)
set_all_readonly(readonly)
Method applying only to FileControls:
add_file(file_object,
content_type="application/octet-stream", filename=None,
name=None, id=None, nr=None, label=None)
Methods applying only to clickable controls:
click(name=None, type=None, id=None, nr=0, coord=(1,1), label=None)
click_request_data(name=None, type=None, id=None, nr=0, coord=(1,1),
label=None)
click_pairs(name=None, type=None, id=None, nr=0, coord=(1,1), label=None)
"""
type2class = {
"text": TextControl,
"password": PasswordControl,
"hidden": HiddenControl,
"textarea": TextareaControl,
"isindex": IsindexControl,
"file": FileControl,
"button": IgnoreControl,
"buttonbutton": IgnoreControl,
"reset": IgnoreControl,
"resetbutton": IgnoreControl,
"submit": SubmitControl,
"submitbutton": SubmitButtonControl,
"image": ImageControl,
"radio": RadioControl,
"checkbox": CheckboxControl,
"select": SelectControl,
}
#---------------------------------------------------
# Initialisation. Use ParseResponse / ParseFile instead.
def __init__(self, action, method="GET",
enctype="application/x-www-form-urlencoded",
name=None, attrs=None,
request_class=_request.Request,
forms=None, labels=None, id_to_labels=None,
backwards_compat=True):
"""
In the usual case, use ParseResponse (or ParseFile) to create new
HTMLForm objects.
action: full (absolute URI) form action
method: "GET" or "POST"
enctype: form transfer encoding MIME type
name: name of form
attrs: dictionary mapping original HTML form attributes to their values
"""
self.action = action
self.method = method
self.enctype = enctype
self.name = name
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
self.controls = []
self._request_class = request_class
# these attributes are used by zope.testbrowser
self._forms = forms # this is a semi-public API!
self._labels = labels # this is a semi-public API!
self._id_to_labels = id_to_labels # this is a semi-public API!
self.backwards_compat = backwards_compat # note __setattr__
self._urlunparse = urlparse.urlunparse
self._urlparse = urlparse.urlparse
def __getattr__(self, name):
if name == "backwards_compat":
return self._backwards_compat
return getattr(HTMLForm, name)
def __setattr__(self, name, value):
# yuck
if name == "backwards_compat":
name = "_backwards_compat"
value = bool(value)
for cc in self.controls:
try:
items = cc.items
except AttributeError:
continue
else:
for ii in items:
for ll in ii.get_labels():
ll._backwards_compat = value
self.__dict__[name] = value
def new_control(self, type, name, attrs,
ignore_unknown=False, select_default=False, index=None):
"""Adds a new control to the form.
This is usually called by ParseFile and ParseResponse. Don't call it
youself unless you're building your own Control instances.
Note that controls representing lists of items are built up from
controls holding only a single list item. See ListControl.__doc__ for
further information.
type: type of control (see Control.__doc__ for a list)
attrs: HTML attributes of control
ignore_unknown: if true, use a dummy Control instance for controls of
unknown type; otherwise, use a TextControl
select_default: for RADIO and multiple-selection SELECT controls, pick
the first item as the default if no 'selected' HTML attribute is
present (this defaulting happens when the HTMLForm.fixup method is
called)
index: index of corresponding element in HTML (see
MoreFormTests.test_interspersed_controls for motivation)
"""
type = type.lower()
klass = self.type2class.get(type)
if klass is None:
if ignore_unknown:
klass = IgnoreControl
else:
klass = TextControl
a = attrs.copy()
if issubclass(klass, ListControl):
control = klass(type, name, a, select_default, index)
else:
control = klass(type, name, a, index)
if type == "select" and len(attrs) == 1:
for ii in range(len(self.controls)-1, -1, -1):
ctl = self.controls[ii]
if ctl.type == "select":
ctl.close_control()
break
control.add_to_form(self)
control._urlparse = self._urlparse
control._urlunparse = self._urlunparse
def fixup(self):
"""Normalise form after all controls have been added.
This is usually called by ParseFile and ParseResponse. Don't call it
youself unless you're building your own Control instances.
This method should only be called once, after all controls have been
added to the form.
"""
for control in self.controls:
control.fixup()
self.backwards_compat = self._backwards_compat
#---------------------------------------------------
def __str__(self):
header = "%s%s %s %s" % (
(self.name and self.name+" " or ""),
self.method, self.action, self.enctype)
rep = [header]
for control in self.controls:
rep.append(" %s" % str(control))
return "<%s>" % "\n".join(rep)
#---------------------------------------------------
# Form-filling methods.
def __getitem__(self, name):
return self.find_control(name).value
def __contains__(self, name):
return bool(self.find_control(name))
def __setitem__(self, name, value):
control = self.find_control(name)
try:
control.value = value
except AttributeError, e:
raise ValueError(str(e))
def get_value(self,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None):
"""Return value of control.
If only name and value arguments are supplied, equivalent to
form[name]
"""
if by_label:
deprecation("form.get_value_by_label(...)")
c = self.find_control(name, type, kind, id, label=label, nr=nr)
if by_label:
try:
meth = c.get_value_by_label
except AttributeError:
raise NotImplementedError(
"control '%s' does not yet support by_label" % c.name)
else:
return meth()
else:
return c.value
def set_value(self, value,
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, # by_label is deprecated
label=None):
"""Set value of control.
If only name and value arguments are supplied, equivalent to
form[name] = value
"""
if by_label:
deprecation("form.get_value_by_label(...)")
c = self.find_control(name, type, kind, id, label=label, nr=nr)
if by_label:
try:
meth = c.set_value_by_label
except AttributeError:
raise NotImplementedError(
"control '%s' does not yet support by_label" % c.name)
else:
meth(value)
else:
c.value = value
def get_value_by_label(
self, name=None, type=None, kind=None, id=None, label=None, nr=None):
"""
All arguments should be passed by name.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
return c.get_value_by_label()
def set_value_by_label(
self, value,
name=None, type=None, kind=None, id=None, label=None, nr=None):
"""
All arguments should be passed by name.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
c.set_value_by_label(value)
def set_all_readonly(self, readonly):
for control in self.controls:
control.readonly = bool(readonly)
def clear_all(self):
"""Clear the value attributes of all controls in the form.
See HTMLForm.clear.__doc__.
"""
for control in self.controls:
control.clear()
def clear(self,
name=None, type=None, kind=None, id=None, nr=None, label=None):
"""Clear the value attribute of a control.
As a result, the affected control will not be successful until a value
is subsequently set. AttributeError is raised on readonly controls.
"""
c = self.find_control(name, type, kind, id, label=label, nr=nr)
c.clear()
#---------------------------------------------------
# Form-filling methods applying only to ListControls.
def possible_items(self, # deprecated
name=None, type=None, kind=None, id=None,
nr=None, by_label=False, label=None):
"""Return a list of all values that the specified control can take."""
c = self._find_list_control(name, type, kind, id, label, nr)
return c.possible_items(by_label)
def set(self, selected, item_name, # deprecated
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, label=None):
"""Select / deselect named list item.
selected: boolean selected state
"""
self._find_list_control(name, type, kind, id, label, nr).set(
selected, item_name, by_label)
def toggle(self, item_name, # deprecated
name=None, type=None, kind=None, id=None, nr=None,
by_label=False, label=None):
"""Toggle selected state of named list item."""
self._find_list_control(name, type, kind, id, label, nr).toggle(
item_name, by_label)
def set_single(self, selected, # deprecated
name=None, type=None, kind=None, id=None,
nr=None, by_label=None, label=None):
"""Select / deselect list item in a control having only one item.
If the control has multiple list items, ItemCountError is raised.
This is just a convenience method, so you don't need to know the item's
name -- the item name in these single-item controls is usually
something meaningless like "1" or "on".
For example, if a checkbox has a single item named "on", the following
two calls are equivalent:
control.toggle("on")
control.toggle_single()
""" # by_label ignored and deprecated
self._find_list_control(
name, type, kind, id, label, nr).set_single(selected)
def toggle_single(self, name=None, type=None, kind=None, id=None,
nr=None, by_label=None, label=None): # deprecated
"""Toggle selected state of list item in control having only one item.
The rest is as for HTMLForm.set_single.__doc__.
""" # by_label ignored and deprecated
self._find_list_control(name, type, kind, id, label, nr).toggle_single()
#---------------------------------------------------
# Form-filling method applying only to FileControls.
def add_file(self, file_object, content_type=None, filename=None,
name=None, id=None, nr=None, label=None):
"""Add a file to be uploaded.
file_object: file-like object (with read method) from which to read
data to upload
content_type: MIME content type of data to upload
filename: filename to pass to server
If filename is None, no filename is sent to the server.
If content_type is None, the content type is guessed based on the
filename and the data from read from the file object.
XXX
At the moment, guessed content type is always application/octet-stream.
Use sndhdr, imghdr modules. Should also try to guess HTML, XML, and
plain text.
Note the following useful HTML attributes of file upload controls (see
HTML 4.01 spec, section 17):
accept: comma-separated list of content types that the server will
handle correctly; you can use this to filter out non-conforming files
size: XXX IIRC, this is indicative of whether form wants multiple or
single files
maxlength: XXX hint of max content length in bytes?
"""
self.find_control(name, "file", id=id, label=label, nr=nr).add_file(
file_object, content_type, filename)
#---------------------------------------------------
# Form submission methods, applying only to clickable controls.
def click(self, name=None, type=None, id=None, nr=0, coord=(1,1),
request_class=_request.Request,
label=None):
"""Return request that would result from clicking on a control.
The request object is a mechanize.Request instance, which you can pass
to mechanize.urlopen.
Only some control types (INPUT/SUBMIT & BUTTON/SUBMIT buttons and
IMAGEs) can be clicked.
Will click on the first clickable control, subject to the name, type
and nr arguments (as for find_control). If no name, type, id or number
is specified and there are no clickable controls, a request will be
returned for the form in its current, un-clicked, state.
IndexError is raised if any of name, type, id or nr is specified but no
matching control is found. ValueError is raised if the HTMLForm has an
enctype attribute that is not recognised.
You can optionally specify a coordinate to click at, which only makes a
difference if you clicked on an image.
"""
return self._click(name, type, id, label, nr, coord, "request",
self._request_class)
def click_request_data(self,
name=None, type=None, id=None,
nr=0, coord=(1,1),
request_class=_request.Request,
label=None):
"""As for click method, but return a tuple (url, data, headers).
You can use this data to send a request to the server. This is useful
if you're using httplib or urllib rather than mechanize. Otherwise,
use the click method.
# Untested. Have to subclass to add headers, I think -- so use
# mechanize instead!
import urllib
url, data, hdrs = form.click_request_data()
r = urllib.urlopen(url, data)
# Untested. I don't know of any reason to use httplib -- you can get
# just as much control with mechanize.
import httplib, urlparse
url, data, hdrs = form.click_request_data()
tup = urlparse(url)
host, path = tup[1], urlparse.urlunparse((None, None)+tup[2:])
conn = httplib.HTTPConnection(host)
if data:
httplib.request("POST", path, data, hdrs)
else:
httplib.request("GET", path, headers=hdrs)
r = conn.getresponse()
"""
return self._click(name, type, id, label, nr, coord, "request_data",
self._request_class)
def click_pairs(self, name=None, type=None, id=None,
nr=0, coord=(1,1),
label=None):
"""As for click_request_data, but returns a list of (key, value) pairs.
You can use this list as an argument to urllib.urlencode. This is
usually only useful if you're using httplib or urllib rather than
mechanize. It may also be useful if you want to manually tweak the
keys and/or values, but this should not be necessary. Otherwise, use
the click method.
Note that this method is only useful for forms of MIME type
x-www-form-urlencoded. In particular, it does not return the
information required for file upload. If you need file upload and are
not using mechanize, use click_request_data.
"""
return self._click(name, type, id, label, nr, coord, "pairs",
self._request_class)
#---------------------------------------------------
def find_control(self,
name=None, type=None, kind=None, id=None,
predicate=None, nr=None,
label=None):
"""Locate and return some specific control within the form.
At least one of the name, type, kind, predicate and nr arguments must
be supplied. If no matching control is found, ControlNotFoundError is
raised.
If name is specified, then the control must have the indicated name.
If type is specified then the control must have the specified type (in
addition to the types possible for <input> HTML tags: "text",
"password", "hidden", "submit", "image", "button", "radio", "checkbox",
"file" we also have "reset", "buttonbutton", "submitbutton",
"resetbutton", "textarea", "select" and "isindex").
If kind is specified, then the control must fall into the specified
group, each of which satisfies a particular interface. The types are
"text", "list", "multilist", "singlelist", "clickable" and "file".
If id is specified, then the control must have the indicated id.
If predicate is specified, then the control must match that function.
The predicate function is passed the control as its single argument,
and should return a boolean value indicating whether the control
matched.
nr, if supplied, is the sequence number of the control (where 0 is the
first). Note that control 0 is the first control matching all the
other arguments (if supplied); it is not necessarily the first control
in the form. If no nr is supplied, AmbiguityError is raised if
multiple controls match the other arguments (unless the
.backwards-compat attribute is true).
If label is specified, then the control must have this label. Note
that radio controls and checkboxes never have labels: their items do.
"""
if ((name is None) and (type is None) and (kind is None) and
(id is None) and (label is None) and (predicate is None) and
(nr is None)):
raise ValueError(
"at least one argument must be supplied to specify control")
return self._find_control(name, type, kind, id, label, predicate, nr)
#---------------------------------------------------
# Private methods.
def _find_list_control(self,
name=None, type=None, kind=None, id=None,
label=None, nr=None):
if ((name is None) and (type is None) and (kind is None) and
(id is None) and (label is None) and (nr is None)):
raise ValueError(
"at least one argument must be supplied to specify control")
return self._find_control(name, type, kind, id, label,
is_listcontrol, nr)
def _find_control(self, name, type, kind, id, label, predicate, nr):
if ((name is not None) and (name is not Missing) and
not isstringlike(name)):
raise TypeError("control name must be string-like")
if (type is not None) and not isstringlike(type):
raise TypeError("control type must be string-like")
if (kind is not None) and not isstringlike(kind):
raise TypeError("control kind must be string-like")
if (id is not None) and not isstringlike(id):
raise TypeError("control id must be string-like")
if (label is not None) and not isstringlike(label):
raise TypeError("control label must be string-like")
if (predicate is not None) and not callable(predicate):
raise TypeError("control predicate must be callable")
if (nr is not None) and nr < 0:
raise ValueError("control number must be a positive integer")
orig_nr = nr
found = None
ambiguous = False
if nr is None and self.backwards_compat:
nr = 0
for control in self.controls:
if ((name is not None and name != control.name) and
(name is not Missing or control.name is not None)):
continue
if type is not None and type != control.type:
continue
if kind is not None and not control.is_of_kind(kind):
continue
if id is not None and id != control.id:
continue
if predicate and not predicate(control):
continue
if label:
for l in control.get_labels():
if l.text.find(label) > -1:
break
else:
continue
if nr is not None:
if nr == 0:
return control # early exit: unambiguous due to nr
nr -= 1
continue
if found:
ambiguous = True
break
found = control
if found and not ambiguous:
return found
description = []
if name is not None: description.append("name %s" % repr(name))
if type is not None: description.append("type '%s'" % type)
if kind is not None: description.append("kind '%s'" % kind)
if id is not None: description.append("id '%s'" % id)
if label is not None: description.append("label '%s'" % label)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr: description.append("nr %d" % orig_nr)
description = ", ".join(description)
if ambiguous:
raise AmbiguityError("more than one control matching "+description)
elif not found:
raise ControlNotFoundError("no control matching "+description)
assert False
def _click(self, name, type, id, label, nr, coord, return_type,
request_class=_request.Request):
try:
control = self._find_control(
name, type, "clickable", id, label, None, nr)
except ControlNotFoundError:
if ((name is not None) or (type is not None) or (id is not None) or
(nr != 0)):
raise
# no clickable controls, but no control was explicitly requested,
# so return state without clicking any control
return self._switch_click(return_type, request_class)
else:
return control._click(self, coord, return_type, request_class)
def _pairs(self):
"""Return sequence of (key, value) pairs suitable for urlencoding."""
return [(k, v) for (i, k, v, c_i) in self._pairs_and_controls()]
def _pairs_and_controls(self):
"""Return sequence of (index, key, value, control_index)
of totally ordered pairs suitable for urlencoding.
control_index is the index of the control in self.controls
"""
pairs = []
for control_index in range(len(self.controls)):
control = self.controls[control_index]
for ii, key, val in control._totally_ordered_pairs():
pairs.append((ii, key, val, control_index))
# stable sort by ONLY first item in tuple
pairs.sort()
return pairs
def _request_data(self):
"""Return a tuple (url, data, headers)."""
method = self.method.upper()
#scheme, netloc, path, parameters, query, frag = urlparse.urlparse(self.action)
parts = self._urlparse(self.action)
rest, (query, frag) = parts[:-2], parts[-2:]
if method == "GET":
if self.enctype != "application/x-www-form-urlencoded":
raise ValueError(
"unknown GET form encoding type '%s'" % self.enctype)
parts = rest + (urllib.urlencode(self._pairs()), None)
uri = self._urlunparse(parts)
return uri, None, []
elif method == "POST":
parts = rest + (query, None)
uri = self._urlunparse(parts)
if self.enctype == "application/x-www-form-urlencoded":
return (uri, urllib.urlencode(self._pairs()),
[("Content-Type", self.enctype)])
elif self.enctype == "multipart/form-data":
data = StringIO()
http_hdrs = []
mw = MimeWriter(data, http_hdrs)
mw.startmultipartbody("form-data", add_to_http_hdrs=True,
prefix=0)
for ii, k, v, control_index in self._pairs_and_controls():
self.controls[control_index]._write_mime_data(mw, k, v)
mw.lastpart()
return uri, data.getvalue(), http_hdrs
else:
raise ValueError(
"unknown POST form encoding type '%s'" % self.enctype)
else:
raise ValueError("Unknown method '%s'" % method)
def _switch_click(self, return_type, request_class=_request.Request):
# This is called by HTMLForm and clickable Controls to hide switching
# on return_type.
if return_type == "pairs":
return self._pairs()
elif return_type == "request_data":
return self._request_data()
else:
req_data = self._request_data()
req = request_class(req_data[0], req_data[1])
for key, val in req_data[2]:
add_hdr = req.add_header
if key.lower() == "content-type":
try:
add_hdr = req.add_unredirected_header
except AttributeError:
# pre-2.4 and not using ClientCookie
pass
add_hdr(key, val)
return req
|
qizenguf/MLC-STT
|
refs/heads/master
|
src/mem/slicc/ast/StallAndWaitStatementAST.py
|
26
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
class StallAndWaitStatementAST(StatementAST):
def __init__(self, slicc, in_port, address):
super(StatementAST, self).__init__(slicc)
self.in_port = in_port
self.address = address
def __repr__(self):
return "[StallAndWaitStatementAst: %r]" % self.in_port
def generate(self, code, return_type):
self.in_port.assertType("InPort")
self.address.assertType("Addr")
in_port_code = self.in_port.var.code
address_code = self.address.var.code
code('''
stallBuffer(&($in_port_code), $address_code);
$in_port_code.stallMessage($address_code, clockEdge());
''')
|
garyfeng/pybrain
|
refs/heads/master
|
pybrain/rl/environments/mazes/tasks/__init__.py
|
4
|
from pybrain.rl.environments.mazes.tasks.tiger import TigerTask
from pybrain.rl.environments.mazes.tasks.maze import TrivialMaze, MazeTask
from pybrain.rl.environments.mazes.tasks.cheesemaze import CheeseMaze
from pybrain.rl.environments.mazes.tasks.tmaze import TMaze
from pybrain.rl.environments.mazes.tasks.maze4x3 import FourByThreeMaze
from pybrain.rl.environments.mazes.tasks.maze89state import EightyNineStateMaze
from pybrain.rl.environments.mazes.tasks.shuttle import ShuttleDocking
from pybrain.rl.environments.mazes.tasks.mdp import MDPMazeTask
|
evinstk/TantechEngineOriginal
|
refs/heads/master
|
lib/googletest-release-1.7.0/test/gtest_list_tests_unittest.py
|
1898
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = '[email protected] (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
hguemar/cinder
|
refs/heads/master
|
cinder/scheduler/weights/goodness.py
|
5
|
# Copyright (C) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cinder.i18n import _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import weights
from cinder.scheduler.evaluator import evaluator
LOG = logging.getLogger(__name__)
class GoodnessWeigher(weights.BaseHostWeigher):
"""Goodness Weigher. Assign weights based on a host's goodness function.
Goodness rating is the following:
0 -- host is a poor choice
...
50 -- host is a good choice
...
100 -- host is a perfect choice
"""
def _weigh_object(self, host_state, weight_properties):
"""Determine host's goodness rating based on a goodness_function."""
stats = self._generate_stats(host_state, weight_properties)
LOG.debug("Checking host '%s'", stats['host_stats']['host'])
result = self._check_goodness_function(stats)
LOG.debug("Goodness: %s", result)
LOG.debug("Done checking host '%s'", stats['host_stats']['host'])
return result
def _check_goodness_function(self, stats):
"""Gets a host's goodness rating based on its goodness function."""
goodness_rating = 0
if stats['goodness_function'] is None:
LOG.warning(_LW("Goodness function not set :: defaulting to "
"minimal goodness rating of 0"))
else:
try:
goodness_result = self._run_evaluator(
stats['goodness_function'],
stats)
except Exception as ex:
LOG.warning(_LW("Error in goodness_function function "
"'%(function)s' : '%(error)s' :: Defaulting "
"to a goodness of 0"),
{'function': stats['goodness_function'],
'error': ex, })
return goodness_rating
if type(goodness_result) is bool:
if goodness_result:
goodness_rating = 100
elif goodness_result < 0 or goodness_result > 100:
LOG.warning(_LW("Invalid goodness result. Result must be "
"between 0 and 100. Result generated: '%s' "
":: Defaulting to a goodness of 0"),
goodness_result)
else:
goodness_rating = goodness_result
return goodness_rating
def _run_evaluator(self, func, stats):
"""Evaluates a given function using the provided available stats."""
host_stats = stats['host_stats']
host_caps = stats['host_caps']
extra_specs = stats['extra_specs']
qos_specs = stats['qos_specs']
volume_stats = stats['volume_stats']
result = evaluator.evaluate(
func,
extra=extra_specs,
stats=host_stats,
capabilities=host_caps,
volume=volume_stats,
qos=qos_specs)
return result
def _generate_stats(self, host_state, weight_properties):
"""Generates statistics from host and volume data."""
host_stats = {
'host': host_state.host,
'volume_backend_name': host_state.volume_backend_name,
'vendor_name': host_state.vendor_name,
'driver_version': host_state.driver_version,
'storage_protocol': host_state.storage_protocol,
'QoS_support': host_state.QoS_support,
'total_capacity_gb': host_state.total_capacity_gb,
'allocated_capacity_gb': host_state.allocated_capacity_gb,
'free_capacity_gb': host_state.free_capacity_gb,
'reserved_percentage': host_state.reserved_percentage,
'updated': host_state.updated,
}
host_caps = host_state.capabilities
goodness_function = None
if ('goodness_function' in host_caps and
host_caps['goodness_function'] is not None):
goodness_function = six.text_type(host_caps['goodness_function'])
qos_specs = weight_properties.get('qos_specs', {})
volume_type = weight_properties.get('volume_type', {})
extra_specs = volume_type.get('extra_specs', {})
request_spec = weight_properties.get('request_spec', {})
volume_stats = request_spec.get('volume_properties', {})
stats = {
'host_stats': host_stats,
'host_caps': host_caps,
'extra_specs': extra_specs,
'qos_specs': qos_specs,
'volume_stats': volume_stats,
'volume_type': volume_type,
'goodness_function': goodness_function,
}
return stats
|
undoware/neutron-drive
|
refs/heads/master
|
google_appengine/lib/django_1_2/tests/regressiontests/file_uploads/models.py
|
110
|
import tempfile
import os
from django.db import models
from django.core.files.storage import FileSystemStorage
temp_storage = FileSystemStorage(tempfile.mkdtemp())
UPLOAD_TO = os.path.join(temp_storage.location, 'test_upload')
class FileModel(models.Model):
testfile = models.FileField(storage=temp_storage, upload_to='test_upload')
|
TheJJ100100/bedrock
|
refs/heads/master
|
bedrock/mozorg/helpers/social_widgets.py
|
23
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
from datetime import datetime
import urllib
import jingo
from lib.l10n_utils.dotlang import _
@jingo.register.function
def format_tweet_body(tweet):
"""
Return a tweet in an HTML format.
@param tweet: A Tweepy Status object retrieved with the Twitter REST API.
See the developer document for details:
https://dev.twitter.com/docs/platform-objects/tweets
"""
text = tweet.text
entities = tweet.entities
# Hashtags (#something)
for hashtags in entities['hashtags']:
hash = hashtags['text']
text = text.replace('#' + hash,
('<a href="https://twitter.com/search?q=%s&src=hash"'
' class="hash">#%s</a>' % ('%23' + urllib.quote(hash.encode('utf8')),
hash)))
# Mentions (@someone)
for user in entities['user_mentions']:
name = user['screen_name']
text = text.replace('@' + name,
('<a href="https://twitter.com/%s" class="mention">@%s</a>'
% (urllib.quote(name.encode('utf8')), name)))
# URLs
for url in entities['urls']:
text = text.replace(url['url'],
('<a href="%s" title="%s">%s</a>'
% (url['url'], url['expanded_url'], url['display_url'])))
# Media
if entities.get('media'):
for medium in entities['media']:
text = text.replace(medium['url'],
('<a href="%s" title="%s" class="media">%s</a>'
% (medium['url'], medium['expanded_url'],
medium['display_url'])))
return text
@jingo.register.function
def format_tweet_timestamp(tweet):
"""
Return an HTML time element filled with a tweet timestamp.
@param tweet: A Tweepy Status object retrieved with the Twitter REST API.
For a tweet posted within the last 24 hours, the timestamp label should be
a relative format like "20s", "3m" or 5h", otherwise it will be a simple
date like "6 Jun". See the Display Requirements for details:
https://dev.twitter.com/terms/display-requirements
"""
now = datetime.utcnow()
created = tweet.created_at # A datetime object
diff = now - created # A timedelta Object
if diff.days == 0:
if diff.seconds < 60:
label = _('%ds') % diff.seconds
elif diff.seconds < 60 * 60:
label = _('%dm') % round(diff.seconds / 60)
else:
label = _('%dh') % round(diff.seconds / 60 / 60)
else:
label = created.strftime("%-d %b")
full = created.strftime("%Y-%m-%d %H:%M")
return ('<time datetime="%s" title="%s" itemprop="dateCreated">%s '
'<span class="full">(%s)</span></time>'
% (created.isoformat(), full, label, full))
|
kwikadi/orange3
|
refs/heads/master
|
Orange/widgets/data/owfeatureconstructor.py
|
2
|
"""
Feature Constructor
A widget for defining (constructing) new features from values
of other variables.
"""
import sys
import re
import copy
import functools
import builtins
import math
import random
from collections import namedtuple, OrderedDict
from itertools import chain, count
from PyQt4 import QtGui
from PyQt4.QtGui import QSizePolicy
from PyQt4.QtCore import Qt, pyqtSignal as Signal, pyqtProperty as Property
import Orange
from Orange.widgets import widget, gui
from Orange.widgets.settings import DomainContextHandler, ContextSetting
from Orange.widgets.utils import itemmodels, vartype
from Orange.widgets.utils.sql import check_sql_input
from Orange.canvas import report
FeatureDescriptor = \
namedtuple("FeatureDescriptor", ["name", "expression"])
ContinuousDescriptor = \
namedtuple("ContinuousDescriptor",
["name", "expression", "number_of_decimals"])
DiscreteDescriptor = \
namedtuple("DiscreteDescriptor",
["name", "expression", "values", "base_value", "ordered"])
StringDescriptor = namedtuple("StringDescriptor", ["name", "expression"])
@functools.lru_cache(50)
def make_variable(descriptor, compute_value=None):
if compute_value is None:
if descriptor.expression.strip():
compute_value = \
lambda instance: eval(descriptor.expression,
{"instance": instance, "_": instance})
else:
compute_value = lambda _: float("nan")
if isinstance(descriptor, ContinuousDescriptor):
return Orange.data.ContinuousVariable(
descriptor.name,
descriptor.number_of_decimals,
compute_value)
elif isinstance(descriptor, DiscreteDescriptor):
return Orange.data.DiscreteVariable(
descriptor.name,
values=descriptor.values,
ordered=descriptor.ordered,
base_value=descriptor.base_value,
compute_value=compute_value)
elif isinstance(descriptor, StringDescriptor):
return Orange.data.StringVariable(
descriptor.name,
compute_value=compute_value)
else:
raise TypeError
def is_valid_expression(exp):
try:
ast.parse(exp, mode="eval")
return True
except Exception:
return False
def selected_row(view):
"""
Return the index of selected row in a `view` (:class:`QListView`)
The view's selection mode must be a QAbstractItemView.SingleSelction
"""
if view.selectionMode() in [QtGui.QAbstractItemView.MultiSelection,
QtGui.QAbstractItemView.ExtendedSelection]:
raise ValueError("invalid 'selectionMode'")
sel_model = view.selectionModel()
indexes = sel_model.selectedRows()
if indexes:
assert len(indexes) == 1
return indexes[0].row()
else:
return None
class FeatureEditor(QtGui.QFrame):
FUNCTIONS = dict(chain([(key, val) for key, val in math.__dict__.items()
if not key.startswith("_")],
[("str", str)]))
featureChanged = Signal()
featureEdited = Signal()
modifiedChanged = Signal(bool)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
layout = QtGui.QFormLayout(
fieldGrowthPolicy=QtGui.QFormLayout.ExpandingFieldsGrow
)
layout.setContentsMargins(0, 0, 0, 0)
self.nameedit = QtGui.QLineEdit(
placeholderText="Name...",
sizePolicy=QSizePolicy(QSizePolicy.Minimum,
QSizePolicy.Fixed)
)
self.expressionedit = QtGui.QLineEdit(
placeholderText="Expression..."
)
self.attrs_model = itemmodels.VariableListModel(
["Select feature"], parent=self)
self.attributescb = QtGui.QComboBox(
minimumContentsLength=16,
sizeAdjustPolicy=QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon,
sizePolicy=QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Minimum)
)
self.attributescb.setModel(self.attrs_model)
sorted_funcs = sorted(self.FUNCTIONS)
self.funcs_model = itemmodels.PyListModelTooltip()
self.funcs_model.setParent(self)
self.funcs_model[:] = chain(["Select function"], sorted_funcs)
self.funcs_model.tooltips[:] = chain(
[''],
[self.FUNCTIONS[func].__doc__ for func in sorted_funcs])
self.functionscb = QtGui.QComboBox(
minimumContentsLength=16,
sizeAdjustPolicy=QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon,
sizePolicy=QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Minimum))
self.functionscb.setModel(self.funcs_model)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.attributescb)
hbox.addWidget(self.functionscb)
layout.addRow(self.nameedit, self.expressionedit)
layout.addRow(self.tr(""), hbox)
self.setLayout(layout)
self.nameedit.editingFinished.connect(self._invalidate)
self.expressionedit.textChanged.connect(self._invalidate)
self.attributescb.currentIndexChanged.connect(self.on_attrs_changed)
self.functionscb.currentIndexChanged.connect(self.on_funcs_changed)
self._modified = False
def setModified(self, modified):
if not type(modified) is bool:
raise TypeError
if self._modified != modified:
self._modified = modified
self.modifiedChanged.emit(modified)
def modified(self):
return self._modified
modified = Property(bool, modified, setModified,
notify=modifiedChanged)
def setEditorData(self, data, domain):
self.nameedit.setText(data.name)
self.expressionedit.setText(data.expression)
self.setModified(False)
self.featureChanged.emit()
self.attrs_model[:] = ["Select feature"]
if domain:
self.attrs_model[:] += chain(domain.attributes,
domain.class_vars,
domain.metas)
def editorData(self):
return FeatureDescriptor(name=self.nameedit.text(),
expression=self.nameedit.text())
def _invalidate(self):
self.setModified(True)
self.featureEdited.emit()
self.featureChanged.emit()
def on_attrs_changed(self):
index = self.attributescb.currentIndex()
if index > 0:
attr = sanitized_name(self.attrs_model[index].name)
self.insert_into_expression(attr)
self.attributescb.setCurrentIndex(0)
def on_funcs_changed(self):
index = self.functionscb.currentIndex()
if index > 0:
func = self.funcs_model[index]
if func in ["atan2", "fmod", "ldexp", "log",
"pow", "copysign", "hypot"]:
self.insert_into_expression(func + "(,)")
self.expressionedit.cursorBackward(False, 2)
elif func in ["e", "pi"]:
self.insert_into_expression(func)
else:
self.insert_into_expression(func + "()")
self.expressionedit.cursorBackward(False)
self.functionscb.setCurrentIndex(0)
def insert_into_expression(self, what):
cp = self.expressionedit.cursorPosition()
ct = self.expressionedit.text()
text = ct[:cp] + what + ct[cp:]
self.expressionedit.setText(text)
self.expressionedit.setFocus()
class ContinuousFeatureEditor(FeatureEditor):
def editorData(self):
return ContinuousDescriptor(
name=self.nameedit.text(),
number_of_decimals=3,
expression=self.expressionedit.text()
)
class DiscreteFeatureEditor(FeatureEditor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.valuesedit = QtGui.QLineEdit()
self.valuesedit.textChanged.connect(self._invalidate)
layout = self.layout()
layout.addRow(self.tr("Values"), self.valuesedit)
def setEditorData(self, data, domain):
self.valuesedit.setText(
", ".join(v.replace(",", r"\,") for v in data.values))
super().setEditorData(data, domain)
def editorData(self):
values = self.valuesedit.text()
values = re.split(r"(?<!\\),", values)
values = tuple(v.replace(r"\,", ",").strip() for v in values)
return DiscreteDescriptor(
name=self.nameedit.text(),
values=values,
base_value=-1,
ordered=False,
expression=self.expressionedit.text()
)
class StringFeatureEditor(FeatureEditor):
def editorData(self):
return StringDescriptor(
name=self.nameedit.text(),
expression=self.expressionedit.text()
)
_VarMap = {
DiscreteDescriptor: vartype(Orange.data.DiscreteVariable()),
ContinuousDescriptor: vartype(Orange.data.ContinuousVariable()),
StringDescriptor: vartype(Orange.data.StringVariable())
}
@functools.lru_cache(20)
def variable_icon(dtype):
vtype = _VarMap.get(dtype, dtype)
try:
return gui.attributeIconDict[vtype]
except Exception:
return QtGui.QIcon()
class FeatureItemDelegate(QtGui.QStyledItemDelegate):
def displayText(self, value, locale):
return value.name + " := " + value.expression
class DescriptorModel(itemmodels.PyListModel):
def data(self, index, role=Qt.DisplayRole):
if role == Qt.DecorationRole:
value = self[index.row()]
return variable_icon(type(value))
else:
return super().data(index, role)
class OWFeatureConstructor(widget.OWWidget):
name = "Feature Constructor"
description = "Construct new features (data columns) from a set of " \
"existing features in the input data set."
icon = "icons/FeatureConstructor.svg"
inputs = [("Data", Orange.data.Table, "setData")]
outputs = [("Data", Orange.data.Table)]
want_main_area = False
settingsHandler = DomainContextHandler()
descriptors = ContextSetting([])
currentIndex = ContextSetting(-1)
EDITORS = [
(ContinuousDescriptor, ContinuousFeatureEditor),
(DiscreteDescriptor, DiscreteFeatureEditor),
(StringDescriptor, StringFeatureEditor)
]
def __init__(self):
super().__init__()
self.data = None
self.editors = {}
box = gui.widgetBox(self.controlArea, "Variable Definitions")
toplayout = QtGui.QHBoxLayout()
toplayout.setContentsMargins(0, 0, 0, 0)
box.layout().addLayout(toplayout)
self.editorstack = QtGui.QStackedWidget(
sizePolicy=QSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.MinimumExpanding)
)
for descclass, editorclass in self.EDITORS:
editor = editorclass()
editor.featureChanged.connect(self._on_modified)
self.editors[descclass] = editor
self.editorstack.addWidget(editor)
self.editorstack.setEnabled(False)
buttonlayout = QtGui.QVBoxLayout(spacing=10)
buttonlayout.setContentsMargins(0, 0, 0, 0)
self.addbutton = QtGui.QPushButton(
"New", toolTip="Create a new variable",
minimumWidth=120,
shortcut=QtGui.QKeySequence.New
)
def unique_name(fmt, reserved):
candidates = (fmt.format(i) for i in count(1))
return next(c for c in candidates if c not in reserved)
def reserved_names():
varnames = []
if self.data is not None:
varnames = [var.name for var in
self.data.domain.variables + self.data.domain.metas]
varnames += [desc.name for desc in self.featuremodel]
return set(varnames)
def generate_newname(fmt):
return unique_name(fmt, reserved_names())
menu = QtGui.QMenu(self.addbutton)
cont = menu.addAction("Continuous")
cont.triggered.connect(
lambda: self.addFeature(
ContinuousDescriptor(generate_newname("X{}"), "", 3))
)
disc = menu.addAction("Discrete")
disc.triggered.connect(
lambda: self.addFeature(
DiscreteDescriptor(generate_newname("D{}"), "",
("A", "B"), -1, False))
)
string = menu.addAction("String")
string.triggered.connect(
lambda: self.addFeature(
StringDescriptor(generate_newname("S{}"), ""))
)
menu.addSeparator()
self.duplicateaction = menu.addAction("Duplicate selected variable")
self.duplicateaction.triggered.connect(self.duplicateFeature)
self.duplicateaction.setEnabled(False)
self.addbutton.setMenu(menu)
self.removebutton = QtGui.QPushButton(
"Remove", toolTip="Remove selected variable",
minimumWidth=120,
shortcut=QtGui.QKeySequence.Delete
)
self.removebutton.clicked.connect(self.removeSelectedFeature)
buttonlayout.addWidget(self.addbutton)
buttonlayout.addWidget(self.removebutton)
buttonlayout.addStretch(10)
toplayout.addLayout(buttonlayout, 0)
toplayout.addWidget(self.editorstack, 10)
# Layout for the list view
layout = QtGui.QVBoxLayout(spacing=1, margin=0)
self.featuremodel = DescriptorModel(parent=self)
self.featureview = QtGui.QListView(
minimumWidth=200,
sizePolicy=QSizePolicy(QSizePolicy.Minimum,
QSizePolicy.MinimumExpanding)
)
self.featureview.setItemDelegate(FeatureItemDelegate(self))
self.featureview.setModel(self.featuremodel)
self.featureview.selectionModel().selectionChanged.connect(
self._on_selectedVariableChanged
)
layout.addWidget(self.featureview)
box.layout().addLayout(layout, 1)
box = gui.widgetBox(self.controlArea, orientation="horizontal")
box.layout().addWidget(self.report_button)
self.report_button.setMinimumWidth(180)
gui.rubber(box)
commit = gui.button(box, self, "Commit", callback=self.apply,
default=True)
commit.setMinimumWidth(180)
def setCurrentIndex(self, index):
index = min(index, len(self.featuremodel) - 1)
self.currentIndex = index
if index >= 0:
itemmodels.select_row(self.featureview, index)
desc = self.featuremodel[min(index, len(self.featuremodel) - 1)]
editor = self.editors[type(desc)]
self.editorstack.setCurrentWidget(editor)
editor.setEditorData(desc, self.data.domain if self.data else None)
self.editorstack.setEnabled(index >= 0)
self.duplicateaction.setEnabled(index >= 0)
self.removebutton.setEnabled(index >= 0)
def _on_selectedVariableChanged(self, selected, *_):
index = selected_row(self.featureview)
if index is not None:
self.setCurrentIndex(index)
else:
self.setCurrentIndex(-1)
def _on_modified(self):
if self.currentIndex >= 0:
editor = self.editorstack.currentWidget()
self.featuremodel[self.currentIndex] = editor.editorData()
self.descriptors = list(self.featuremodel)
def setDescriptors(self, descriptors):
"""
Set a list of variable descriptors to edit.
"""
self.descriptors = descriptors
self.featuremodel[:] = list(self.descriptors)
@check_sql_input
def setData(self, data=None):
"""Set the input dataset."""
self.closeContext()
self.data = data
if self.data is not None:
descriptors = list(self.descriptors)
currindex = self.currentIndex
self.openContext(data)
if descriptors != self.descriptors or \
self.currentIndex != currindex:
# disconnect from the selection model while reseting the model
selmodel = self.featureview.selectionModel()
selmodel.selectionChanged.disconnect(
self._on_selectedVariableChanged)
self.featuremodel[:] = list(self.descriptors)
self.setCurrentIndex(self.currentIndex)
selmodel.selectionChanged.connect(
self._on_selectedVariableChanged)
self.editorstack.setEnabled(self.currentIndex >= 0)
def handleNewSignals(self):
if self.data is not None:
self.apply()
else:
self.send("Data", None)
def addFeature(self, descriptor):
self.featuremodel.append(descriptor)
self.setCurrentIndex(len(self.featuremodel) - 1)
editor = self.editorstack.currentWidget()
editor.nameedit.setFocus()
editor.nameedit.selectAll()
def removeFeature(self, index):
del self.featuremodel[index]
index = selected_row(self.featureview)
if index is not None:
self.setCurrentIndex(index)
elif index is None and len(self.featuremodel) > 0:
# Deleting the last item clears selection
self.setCurrentIndex(len(self.featuremodel) - 1)
def removeSelectedFeature(self):
if self.currentIndex >= 0:
self.removeFeature(self.currentIndex)
def duplicateFeature(self):
desc = self.featuremodel[self.currentIndex]
self.addFeature(copy.deepcopy(desc))
def check_attrs_values(self, attr, data):
for i in range(len(data)):
for var in attr:
if not math.isnan(data[i, var]) \
and int(data[i, var]) >= len(var.values):
return var.name
return None
def apply(self):
if self.data is None:
return
desc = list(self.featuremodel)
def remove_invalid_expression(desc):
return (desc if is_valid_expression(desc.expression)
else desc._replace(expression=""))
desc = map(remove_invalid_expression, desc)
source_vars = tuple(self.data.domain) + self.data.domain.metas
new_variables = construct_variables(desc, source_vars)
attrs = [var for var in new_variables if var.is_primitive()]
metas = [var for var in new_variables if not var.is_primitive()]
new_domain = Orange.data.Domain(
self.data.domain.attributes + tuple(attrs),
self.data.domain.class_vars,
metas=self.data.domain.metas + tuple(metas)
)
self.error(0)
try:
data = Orange.data.Table(new_domain, self.data)
except Exception as err:
self.error(0, repr(err.args[0]))
return
self.error(1)
disc_attrs_not_ok = self.check_attrs_values(
[var for var in attrs if var.is_discrete], data)
if disc_attrs_not_ok:
self.error(1, 'Discrete variable %s needs more values.' %
disc_attrs_not_ok)
return
self.send("Data", data)
def send_report(self):
items = OrderedDict()
for feature in self.featuremodel:
if isinstance(feature, DiscreteDescriptor):
items[feature.name] = "{} (discrete with values {}{})".format(
feature.expression, feature.values,
"; ordered" * feature.ordered)
elif isinstance(feature, ContinuousDescriptor):
items[feature.name] = "{} (numeric)".format(feature.expression)
else:
items[feature.name] = "{} (text)".format(feature.expression)
self.report_items(
report.plural("Constructed feature{s}", len(items)), items)
import ast
def freevars(exp, env):
etype = type(exp)
if etype in [ast.Expr, ast.Expression]:
return freevars(exp.body, env)
elif etype == ast.BoolOp:
return sum((freevars(v, env) for v in exp.values), [])
elif etype == ast.BinOp:
return freevars(exp.left, env) + freevars(exp.right, env)
elif etype == ast.UnaryOp:
return freevars(exp.operand, env)
elif etype == ast.IfExp:
return (freevars(exp.test, env) + freevars(exp.body, env) +
freevars(exp.orelse, env))
elif etype == ast.Dict:
return sum((freevars(v, env) for v in exp.values), [])
elif etype == ast.Set:
return sum((freevars(v, env) for v in exp.elts), [])
elif etype in [ast.SetComp, ast.ListComp, ast.GeneratorExp]:
raise NotImplementedError
elif etype == ast.DictComp:
raise NotImplementedError
# Yield, YieldFrom???
elif etype == ast.Compare:
return sum((freevars(v, env) for v in [exp.left] + exp.comparators), [])
elif etype == ast.Call:
return sum((freevars(e, env)
for e in [exp.func] + (exp.args or []) +
(exp.keywords or []) +
(exp.starargs or []) +
(exp.kwargs or [])),
[])
elif etype in [ast.Num, ast.Str, ast.Ellipsis]:
# elif etype in [ast.Num, ast.Str, ast.Ellipsis, ast.Bytes]:
return []
elif etype == ast.Attribute:
return freevars(exp.value, env)
elif etype == ast.Subscript:
return freevars(exp.value, env) + freevars(exp.slice, env),
elif etype == ast.Name:
return [exp.id] if exp.id not in env else []
elif etype == ast.List:
return sum((freevars(e, env) for e in exp.elts), [])
elif etype == ast.Tuple:
return sum((freevars(e, env) for e in exp.elts), [])
elif etype == ast.Slice:
return sum((freevars(e, env)
for e in filter(None, [exp.lower, exp.upper, exp.step])),
[])
elif etype == ast.ExtSlice:
return sum((freevars(e, env) for e in exp.dims), [])
elif etype == ast.Index:
return freevars(exp.value, env)
else:
raise ValueError(exp)
def construct_variables(descriptions, source_vars):
# subs
variables = []
for desc in descriptions:
_, func = bind_variable(desc, source_vars)
var = make_variable(desc, func)
variables.append(var)
return variables
def sanitized_name(name):
return re.sub(r"\W", "_", name)
def bind_variable(descriptor, env):
"""
(descriptor, env) ->
(descriptor, (instance -> value) | (table -> value list))
"""
if not descriptor.expression.strip():
return (descriptor, lambda _: float("nan"))
exp_ast = ast.parse(descriptor.expression, mode="eval")
freev = unique(freevars(exp_ast, []))
variables = {sanitized_name(v.name): v for v in env}
source_vars = [(name, variables[name]) for name in freev
if name in variables]
values = []
if isinstance(descriptor, DiscreteDescriptor):
values = [sanitized_name(v) for v in descriptor.values]
return descriptor, FeatureFunc(exp_ast, source_vars, values)
def make_lambda(expression, args, values):
def make_arg(name):
if sys.version_info >= (3, 0):
return ast.arg(arg=name, annotation=None)
else:
return ast.Name(id=arg, ctx=ast.Param(), lineno=1, col_offset=0)
lambda_ = ast.Lambda(
args=ast.arguments(
args=[make_arg(arg) for arg in args + values],
varargs=None,
varargannotation=None,
kwonlyargs=[],
kwarg=None,
kwargannotation=None,
defaults=[ast.Num(i) for i in range(len(values))],
kw_defaults=[]),
body=expression.body,
)
lambda_ = ast.copy_location(lambda_, expression.body)
exp = ast.Expression(body=lambda_, lineno=1, col_offset=0)
ast.dump(exp)
ast.fix_missing_locations(exp)
GLOBALS = __GLOBALS.copy()
GLOBALS["__builtins__"] = {}
return eval(compile(exp, "<lambda>", "eval"), GLOBALS)
__ALLOWED = [
"Ellipsis", "False", "None", "True", "abs", "all", "any", "acsii",
"bin", "bool", "bytearray", "bytes", "chr", "complex", "dict",
"divmod", "enumerate", "filter", "float", "format", "frozenset",
"getattr", "hasattr", "hash", "hex", "id", "int", "iter", "len",
"list", "map", "max", "memoryview", "min", "next", "object",
"oct", "ord", "pow", "range", "repr", "reversed", "round",
"set", "slice", "sorted", "str", "sum", "tuple", "type",
"zip"
]
__GLOBALS = {name: getattr(builtins, name) for name in __ALLOWED
if hasattr(builtins, name)}
__GLOBALS.update({name: getattr(math, name) for name in dir(math)
if not name.startswith("_")})
__GLOBALS.update({
"normalvariate": random.normalvariate,
"gauss": random.gauss,
"expovariate": random.expovariate,
"gammavariate": random.gammavariate,
"betavariate": random.betavariate,
"lognormvariate": random.lognormvariate,
"paretovariate": random.paretovariate,
"vonmisesvariate": random.vonmisesvariate,
"weibullvariate": random.weibullvariate,
"triangular": random.triangular,
"uniform": random.uniform}
)
class FeatureFunc:
def __init__(self, expression, args, values):
self.expression = expression
self.args = args
self.values = values
self.func = make_lambda(expression, [name for name, _ in args], values)
def __call__(self, instance, *_):
if isinstance(instance, Orange.data.Table):
return [self(inst) for inst in instance]
else:
args = [instance[var] for _, var in self.args]
return self.func(*args)
def unique(seq):
seen = set()
unique_el = []
for el in seq:
if el not in seen:
unique_el.append(el)
seen.add(el)
return unique_el
def main(argv=sys.argv):
app = QtGui.QApplication(list(argv))
argv = app.argv()
if len(argv) > 1:
filename = argv[1]
else:
filename = "iris"
w = OWFeatureConstructor()
w.show()
w.raise_()
data = Orange.data.Table(filename)
w.setData(data)
w.handleNewSignals()
app.exec_()
w.setData(None)
w.handleNewSignals()
w.saveSettings()
return 0
if __name__ == "__main__":
sys.exit(main())
|
jadami10/crossbarexamples
|
refs/heads/master
|
rest/needs_cleanup/python/example/flask/__init__.py
|
12
|
###############################################################################
##
## Copyright (C) 2012-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
import crossbarconnect
from flask import Flask, request, render_template
## some configuration
##
PUSH_URL = "http://127.0.0.1:8080/push"
TOPIC_URI = "com.myapp.topic1"
WAMP_URL = "ws://127.0.0.1:8080/ws"
WAMP_REALM = "realm1"
app = Flask(__name__)
@app.route("/")
def index():
"""
Render demo main page.
"""
return render_template('index.html',
## prefill the form with these values
name = "Heinzelmann", age = 23,
## config for subscribing to events
router = WAMP_URL, realm = WAMP_REALM, topic = TOPIC_URI)
@app.route('/form1', methods = ['POST'])
def form1_submit():
"""
Extract data from a submitted HTML form, publish event via
Crossbar.io HTTP Pusher service and render success page.
"""
try:
## here we publish the form data via Crossbar.io HTTP Pusher service
## as a WAMP event to all subscribers on TOPIC_URI
##
publication_id = app.pusher.publish(TOPIC_URI,
name = request.form['name'], age = request.form['age'])
return render_template('onsubmit.html', publication_id = publication_id)
except Exception as e:
return "Publication failed: {}".format(e)
if __name__ == "__main__":
## we create a client for pushing event via Crossbar.io
app.pusher = crossbarconnect.Client(PUSH_URL)
## now run our Flask app
app.run(debug = True)
|
capstone-rust/capstone-rs
|
refs/heads/master
|
capstone-sys/capstone/bindings/python/test_arm.py
|
9
|
#!/usr/bin/env python
# Capstone Python bindings, by Nguyen Anh Quynnh <[email protected]>
from __future__ import print_function
from capstone import *
from capstone.arm import *
from xprint import to_hex, to_x_32
ARM_CODE = b"\x86\x48\x60\xf4\xED\xFF\xFF\xEB\x04\xe0\x2d\xe5\x00\x00\x00\x00\xe0\x83\x22\xe5\xf1\x02\x03\x0e\x00\x00\xa0\xe3\x02\x30\xc1\xe7\x00\x00\x53\xe3\x00\x02\x01\xf1\x05\x40\xd0\xe8\xf4\x80\x00\x00"
ARM_CODE2 = b"\xd1\xe8\x00\xf0\xf0\x24\x04\x07\x1f\x3c\xf2\xc0\x00\x00\x4f\xf0\x00\x01\x46\x6c"
THUMB_CODE = b"\x70\x47\x00\xf0\x10\xe8\xeb\x46\x83\xb0\xc9\x68\x1f\xb1\x30\xbf\xaf\xf3\x20\x84\x52\xf8\x23\xf0"
THUMB_CODE2 = b"\x4f\xf0\x00\x01\xbd\xe8\x00\x88\xd1\xe8\x00\xf0\x18\xbf\xad\xbf\xf3\xff\x0b\x0c\x86\xf3\x00\x89\x80\xf3\x00\x8c\x4f\xfa\x99\xf6\xd0\xff\xa2\x01"
THUMB_MCLASS = b"\xef\xf3\x02\x80"
ARMV8 = b"\xe0\x3b\xb2\xee\x42\x00\x01\xe1\x51\xf0\x7f\xf5"
all_tests = (
(CS_ARCH_ARM, CS_MODE_ARM, ARM_CODE, "ARM", None),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE, "Thumb", None),
(CS_ARCH_ARM, CS_MODE_THUMB, ARM_CODE2, "Thumb-mixed", None),
(CS_ARCH_ARM, CS_MODE_THUMB, THUMB_CODE2, "Thumb-2 & register named with numbers", CS_OPT_SYNTAX_NOREGNAME),
(CS_ARCH_ARM, CS_MODE_THUMB + CS_MODE_MCLASS, THUMB_MCLASS, "Thumb-MClass", None),
(CS_ARCH_ARM, CS_MODE_ARM + CS_MODE_V8, ARMV8, "Arm-V8", None),
)
def print_insn_detail(insn):
# print address, mnemonic and operands
print("0x%x:\t%s\t%s" % (insn.address, insn.mnemonic, insn.op_str))
# "data" instruction generated by SKIPDATA option has no detail
if insn.id == 0:
return
if len(insn.operands) > 0:
print("\top_count: %u" % len(insn.operands))
c = 0
for i in insn.operands:
if i.type == ARM_OP_REG:
print("\t\toperands[%u].type: REG = %s" % (c, insn.reg_name(i.reg)))
if i.type == ARM_OP_IMM:
print("\t\toperands[%u].type: IMM = 0x%s" % (c, to_x_32(i.imm)))
if i.type == ARM_OP_PIMM:
print("\t\toperands[%u].type: P-IMM = %u" % (c, i.imm))
if i.type == ARM_OP_CIMM:
print("\t\toperands[%u].type: C-IMM = %u" % (c, i.imm))
if i.type == ARM_OP_FP:
print("\t\toperands[%u].type: FP = %f" % (c, i.fp))
if i.type == ARM_OP_SYSREG:
print("\t\toperands[%u].type: SYSREG = %u" % (c, i.reg))
if i.type == ARM_OP_SETEND:
if i.setend == ARM_SETEND_BE:
print("\t\toperands[%u].type: SETEND = be" % c)
else:
print("\t\toperands[%u].type: SETEND = le" % c)
if i.type == ARM_OP_MEM:
print("\t\toperands[%u].type: MEM" % c)
if i.mem.base != 0:
print("\t\t\toperands[%u].mem.base: REG = %s" \
% (c, insn.reg_name(i.mem.base)))
if i.mem.index != 0:
print("\t\t\toperands[%u].mem.index: REG = %s" \
% (c, insn.reg_name(i.mem.index)))
if i.mem.scale != 1:
print("\t\t\toperands[%u].mem.scale: %u" \
% (c, i.mem.scale))
if i.mem.disp != 0:
print("\t\t\toperands[%u].mem.disp: 0x%s" \
% (c, to_x_32(i.mem.disp)))
if i.mem.lshift != 0:
print("\t\t\toperands[%u].mem.lshift: 0x%s" \
% (c, to_x_32(i.mem.lshift)))
if i.neon_lane != -1:
print("\t\toperands[%u].neon_lane = %u" % (c, i.neon_lane))
if i.access == CS_AC_READ:
print("\t\toperands[%u].access: READ\n" % (c))
elif i.access == CS_AC_WRITE:
print("\t\toperands[%u].access: WRITE\n" % (c))
elif i.access == CS_AC_READ | CS_AC_WRITE:
print("\t\toperands[%u].access: READ | WRITE\n" % (c))
if i.shift.type != ARM_SFT_INVALID and i.shift.value:
print("\t\t\tShift: %u = %u" \
% (i.shift.type, i.shift.value))
if i.vector_index != -1:
print("\t\t\toperands[%u].vector_index = %u" %(c, i.vector_index))
if i.subtracted:
print("\t\t\toperands[%u].subtracted = True" %c)
c += 1
if insn.update_flags:
print("\tUpdate-flags: True")
if insn.writeback:
print("\tWrite-back: True")
if not insn.cc in [ARM_CC_AL, ARM_CC_INVALID]:
print("\tCode condition: %u" % insn.cc)
if insn.cps_mode:
print("\tCPSI-mode: %u" %(insn.cps_mode))
if insn.cps_flag:
print("\tCPSI-flag: %u" %(insn.cps_flag))
if insn.vector_data:
print("\tVector-data: %u" %(insn.vector_data))
if insn.vector_size:
print("\tVector-size: %u" %(insn.vector_size))
if insn.usermode:
print("\tUser-mode: True")
if insn.mem_barrier:
print("\tMemory-barrier: %u" %(insn.mem_barrier))
(regs_read, regs_write) = insn.regs_access()
if len(regs_read) > 0:
print("\tRegisters read:", end="")
for r in regs_read:
print(" %s" %(insn.reg_name(r)), end="")
print("")
if len(regs_write) > 0:
print("\tRegisters modified:", end="")
for r in regs_write:
print(" %s" %(insn.reg_name(r)), end="")
print("")
# ## Test class Cs
def test_class():
for (arch, mode, code, comment, syntax) in all_tests:
print("*" * 16)
print("Platform: %s" % comment)
print("Code: %s" % to_hex(code))
print("Disasm:")
try:
md = Cs(arch, mode)
if syntax is not None:
md.syntax = syntax
md.detail = True
for insn in md.disasm(code, 0x80001000):
print_insn_detail(insn)
print ()
except CsError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_class()
|
JamesLinEngineer/RKMC
|
refs/heads/Jarvis
|
addons/script.module.requests/lib/requests/packages/chardet/hebrewprober.py
|
2928
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
|
Just-D/panda3d
|
refs/heads/master
|
direct/src/directnotify/DirectNotifyGlobal.py
|
10
|
"""instantiate global DirectNotify used in Direct"""
__all__ = ['directNotify', 'giveNotify']
import DirectNotify
directNotify = DirectNotify.DirectNotify()
giveNotify = directNotify.giveNotify
|
40223204/w16b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_sre.py
|
622
|
# NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
MAXREPEAT = 2147483648
#import array
import operator, sys
from sre_constants import ATCODES, OPCODES, CHCODES
from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE
import sys
# Identifying as _sre from Python 2.3 or 2.4
#if sys.version_info[:2] >= (2, 4):
MAGIC = 20031017
#else:
# MAGIC = 20030419
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
# starting with python 3.3 CODESIZE is 4
#if sys.maxunicode == 65535:
# CODESIZE = 2
#else:
CODESIZE = 4
copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann"
def getcodesize():
return CODESIZE
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def getlower(char_ord, flags):
if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \
or (flags & SRE_FLAG_LOCALE and char_ord < 256):
#return ord(unichr(char_ord).lower())
return ord(chr(char_ord).lower())
else:
return char_ord
class SRE_Pattern:
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxsize):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _State(string, pos, endpos, self.flags)
if state.match(self._code):
return SRE_Match(self, state)
return None
def search(self, string, pos=0, endpos=sys.maxsize):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _State(string, pos, endpos, self.flags)
if state.search(self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def _subx(self, template, string, count=0, subn=False):
filter = template
if not callable(template) and "\\" in template:
# handle non-literal strings ; hand it over to the template compiler
#import sre #sre was renamed to re
#fix me brython
#print("possible issue at _sre.py line 116")
import re as sre
filter = sre._subx(self, template)
state = _State(string, 0, sys.maxsize, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
if subn:
return item, n
else:
return item
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
return self._subx(repl, string, count, False)
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
return self._subx(repl, string, count, True)
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _State(string, 0, sys.maxsize, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
#scanner = self.scanner(string, pos, endpos)
_list=[]
_m=self.scanner(string, pos, endpos)
_re=SRE_Scanner(self, string, pos, endpos)
_m=_re.search()
while _m:
_list.append(_m)
_m=_re.search()
return _list
#return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxsize):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError("cannot copy this pattern object")
def __deepcopy__(self):
raise TypeError("cannot copy this pattern object")
class SRE_Scanner:
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
state.string_position = state.start
match = None
if matcher(self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(self._state.match)
def search(self):
return self._match_search(self._state.search)
class SRE_Match:
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = self._create_regs(state)
#statement below is not valid under python3 ( 0 <= None)
#if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _create_regs(self, state):
"""Creates a tuple of index pairs representing matched groups."""
regs = [(state.start, state.string_position)]
for group in range(self.re.groups):
mark_index = 2 * group
if mark_index + 1 < len(state.marks) \
and state.marks[mark_index] is not None \
and state.marks[mark_index + 1] is not None:
regs.append((state.marks[mark_index], state.marks[mark_index + 1]))
else:
regs.append((-1, -1))
return tuple(regs)
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if group in self.re.groupindex:
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError("cannot copy this pattern object")
def __deepcopy__():
raise TypeError("cannot copy this pattern object")
class _State:
def __init__(self, string, start, end, flags):
self.string = string
if start < 0:
start = 0
if end > len(string):
end = len(string)
self.start = start
self.string_position = self.start
self.end = end
self.pos = start
self.flags = flags
self.reset()
def reset(self):
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def match(self, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
# brython.. the optimization doesn't work
#if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]:
# if self.end - self.string_position < pattern_codes[3]:
# #_log("reject (got %d chars, need %d)"
# # % (self.end - self.string_position, pattern_codes[3]))
# return False
dispatcher = _OpcodeDispatcher()
self.context_stack.append(_MatchContext(self, pattern_codes))
has_matched = None
while len(self.context_stack) > 0:
context = self.context_stack[-1]
has_matched = dispatcher.match(context)
if has_matched is not None: # don't pop if context isn't done
self.context_stack.pop()
return has_matched
def search(self, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODES["info"]:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return self.fast_search(pattern_codes)
flags = pattern_codes[2]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
string_position = self.start
if pattern_codes[0] == OPCODES["literal"]:
# Special case: Pattern starts with a literal character. This is
# used for short prefixes
character = pattern_codes[1]
while True:
while string_position < self.end \
and ord(self.string[string_position]) != character:
string_position += 1
if string_position >= self.end:
return False
self.start = string_position
string_position += 1
self.string_position = string_position
if flags & SRE_INFO_LITERAL:
return True
if self.match(pattern_codes[2:]):
return True
return False
# General case
while string_position <= self.end:
self.reset()
self.start = self.string_position = string_position
if self.match(pattern_codes):
return True
string_position += 1
return False
def fast_search(self, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
prefix_skip = pattern_codes[6] # don't really know what this is good for
prefix = pattern_codes[7:7 + prefix_len]
overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
i = 0
string_position = self.string_position
while string_position < self.end:
while True:
if ord(self.string[string_position]) != prefix[i]:
if i == 0:
break
else:
i = overlap[i]
else:
i += 1
if i == prefix_len:
# found a potential match
self.start = string_position + 1 - prefix_len
self.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
if self.match(pattern_codes[2 * prefix_skip:]):
return True
i = overlap[i]
break
string_position += 1
return False
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
# fix python 3 division incompatability
#self.lastindex = mark_nr / 2 + 1
self.lastindex = mark_nr // 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([None] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return None, None
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
self.marks, self.lastindex = self.marks_stack[-1]
def marks_pop_discard(self):
self.marks_stack.pop()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
class _MatchContext:
def __init__(self, state, pattern_codes):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = 0
self.has_matched = None
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
child_context = _MatchContext(self.state,
self.pattern_codes[self.code_position + pattern_offset:])
#print("_sre.py:517:pushing new context") #, child_context.has_matched)
#print(self.state.string_position)
#print(self.pattern_codes[self.code_position + pattern_offset:])
#print(pattern_offset)
self.state.context_stack.append(child_context)
return child_context
def peek_char(self, peek=0):
return self.state.string[self.string_position + peek]
def skip_char(self, skip_count):
self.string_position += skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position += skip_count
def remaining_codes(self):
return len(self.pattern_codes) - self.code_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and _is_linebreak(self.peek_char())
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
class _RepeatContext(_MatchContext):
def __init__(self, context):
_MatchContext.__init__(self, context.state,
context.pattern_codes[context.code_position:])
self.count = -1
#print('569:repeat', context.state.repeat)
self.previous = context.state.repeat
self.last_position = None
class _Dispatcher:
DISPATCH_TABLE = None
def dispatch(self, code, context):
method = self.DISPATCH_TABLE.get(code, self.__class__.unknown)
return method(self, context)
def unknown(self, code, ctx):
raise NotImplementedError()
def build_dispatch_table(cls, code_dict, method_prefix):
if cls.DISPATCH_TABLE is not None:
return
table = {}
for key, value in code_dict.items():
if hasattr(cls, "%s%s" % (method_prefix, key)):
table[value] = getattr(cls, "%s%s" % (method_prefix, key))
cls.DISPATCH_TABLE = table
build_dispatch_table = classmethod(build_dispatch_table)
class _OpcodeDispatcher(_Dispatcher):
def __init__(self):
self.executing_contexts = {}
self.at_dispatcher = _AtcodeDispatcher()
self.ch_dispatcher = _ChcodeDispatcher()
self.set_dispatcher = _CharsetDispatcher()
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
#if self.executing_contexts.has_key(id(context)):
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = next(generator)
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "__next__"): # avoid using the types module
generator = has_finished
has_finished = next(generator)
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished
def op_success(self, ctx):
# end of pattern
#self._log(ctx, "SUCCESS")
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
return True
def op_failure(self, ctx):
# immediate failure
#self._log(ctx, "FAILURE")
ctx.has_matched = False
return True
def general_op_literal(self, ctx, compare, decorate=lambda x: x):
#print(ctx.peek_char())
if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())),
decorate(ctx.peek_code(1))):
ctx.has_matched = False
ctx.skip_code(2)
ctx.skip_char(1)
def op_literal(self, ctx):
# match literal string
# <LITERAL> <code>
#self._log(ctx, "LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq)
return True
def op_not_literal(self, ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
#self._log(ctx, "NOT_LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne)
return True
def op_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq, ctx.state.lower)
return True
def op_not_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne, ctx.state.lower)
return True
def op_at(self, ctx):
# match at given position
# <AT> <code>
#self._log(ctx, "AT", ctx.peek_code(1))
if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line693, update context.has_matched variable')
return True
ctx.skip_code(2)
return True
def op_category(self, ctx):
# match at given category
# <CATEGORY> <code>
#self._log(ctx, "CATEGORY", ctx.peek_code(1))
if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line703, update context.has_matched variable')
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(self, ctx):
# match anything (except a newline)
# <ANY>
#self._log(ctx, "ANY")
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = False
#print('_sre.py:line714, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(self, ctx):
# match anything
# <ANY_ALL>
#self._log(ctx, "ANY_ALL")
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line725, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(self, ctx, decorate=lambda x: x):
#self._log(ctx, "OP_IN")
#print('general_op_in')
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line734, update context.has_matched variable')
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
#print(ctx.peek_char(), ord(ctx.peek_char()),
# decorate(ord(ctx.peek_char())))
if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))):
#print('_sre.py:line738, update context.has_matched variable')
ctx.has_matched = False
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
#print('end:general_op_in')
def op_in(self, ctx):
# match set member (or non_member)
# <IN> <skip> <set>
#self._log(ctx, "OP_IN")
self.general_op_in(ctx)
return True
def op_in_ignore(self, ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
#self._log(ctx, "OP_IN_IGNORE")
self.general_op_in(ctx, ctx.state.lower)
return True
def op_jump(self, ctx):
# jump forward
# <JUMP> <offset>
#self._log(ctx, "JUMP", ctx.peek_code(1))
ctx.skip_code(ctx.peek_code(1) + 1)
return True
# skip info
# <INFO> <skip>
op_info = op_jump
def op_mark(self, ctx):
# set mark
# <MARK> <gid>
#self._log(ctx, "OP_MARK", ctx.peek_code(1))
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def op_branch(self, ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
#self._log(ctx, "BRANCH")
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
while current_branch_length:
# The following tries to shortcut branches starting with a
# (unmatched) literal. _sre.c also shortcuts charsets here.
if not (ctx.peek_code(1) == OPCODES["literal"] and \
(ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))):
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(1)
#print("_sre.py:803:op_branch")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.marks_pop_keep()
ctx.skip_code(current_branch_length)
current_branch_length = ctx.peek_code(0)
ctx.state.marks_pop_discard()
ctx.has_matched = False
#print('_sre.py:line805, update context.has_matched variable')
yield True
def op_repeat_one(self, ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#print("repeat one", mincount, maxcount)
#self._log(ctx, "REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
count = self.count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = False
yield True
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]:
# Special case: Tail starts with a literal. Skip positions where
# the rest of the pattern cannot possibly match.
char = ctx.peek_code(ctx.peek_code(1) + 2)
while True:
while count >= mincount and \
(ctx.at_end() or ord(ctx.peek_char()) != char):
ctx.skip_char(-1)
count -= 1
if count < mincount:
break
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:856:push_new_context")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
else:
# General case: backtracking
while count >= mincount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
#ctx.has_matched = True # <== this should be True (so match object gets returned to program)
yield True
def op_min_repeat_one(self, ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = self.count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = False
#print('_sre.py:line891, update context.has_matched variable')
yield True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
while maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print('_sre.py:916:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.string_position = ctx.string_position
if self.count_repetitions(ctx, 1) == 0:
break
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
yield True
def op_repeat(self, ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
#self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3))
#if ctx.state.repeat is None:
# print("951:ctx.state.repeat is None")
# #ctx.state.repeat=_RepeatContext(ctx)
repeat = _RepeatContext(ctx)
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:941:push new context", id(child_context))
#print(child_context.state.repeat)
#print(ctx.state.repeat)
# are these two yields causing the issue?
yield False
ctx.state.repeat = repeat.previous
ctx.has_matched = child_context.has_matched
yield True
def op_max_until(self, ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
repeat = ctx.state.repeat
#print("op_max_until") #, id(ctx.state.repeat))
if repeat is None:
#print(id(ctx), id(ctx.state))
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MAX_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
save_last_position = repeat.last_position # zero-width match protection
repeat.last_position = ctx.state.string_position
child_context = repeat.push_new_context(4)
yield False
repeat.last_position = save_last_position
if child_context.has_matched:
ctx.state.marks_pop_discard()
ctx.has_matched = True
yield True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# cannot match more repeated items here. make sure the tail matches
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print("_sre.py:987:op_max_until")
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
yield True
def op_min_until(self, ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MIN_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print('_sre.py:1022:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = False
#print('_sre.py:line1022, update context.has_matched variable')
yield True
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
def general_op_groupref(self, ctx, decorate=lambda x: x):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.has_matched = False
return True
while group_start < group_end:
if ctx.at_end() or decorate(ord(ctx.peek_char())) \
!= decorate(ord(ctx.state.string[group_start])):
ctx.has_matched = False
#print('_sre.py:line1042, update context.has_matched variable')
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(self, ctx):
# match backreference
# <GROUPREF> <zero-based group index>
#self._log(ctx, "GROUPREF", ctx.peek_code(1))
return self.general_op_groupref(ctx)
def op_groupref_ignore(self, ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
#self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1))
return self.general_op_groupref(ctx, ctx.state.lower)
def op_groupref_exists(self, ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
#self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1))
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(self, ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = False
yield True
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = False
yield True
def op_assert_not(self, ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT_NOT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.has_matched = False
yield True
ctx.skip_code(ctx.peek_code(1) + 1)
yield True
def unknown(self, ctx):
#self._log(ctx, "UNKNOWN", ctx.peek_code())
raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code())
def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
#print("_sre.py:1123:check_charset", result)
return result
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
#print("count_repetitions", ctx.has_matched, count)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count
def _log(self, context, opname, *args):
arg_string = ("%s " * len(args)) % args
_log("|%s|%s|%s %s" % (context.pattern_codes,
context.string_position, opname, arg_string))
_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_")
class _CharsetDispatcher(_Dispatcher):
def __init__(self):
self.ch_dispatcher = _ChcodeDispatcher()
def reset(self, char):
self.char = char
self.ok = True
def set_failure(self, ctx):
return not self.ok
def set_literal(self, ctx):
# <LITERAL> <code>
if ctx.peek_code(1) == self.char:
return self.ok
else:
ctx.skip_code(2)
def set_category(self, ctx):
# <CATEGORY> <code>
if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
return self.ok
else:
ctx.skip_code(2)
def set_charset(self, ctx):
# <CHARSET> <bitmap> (16 bits per code word)
char_code = self.char
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return self.ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return self.ok
ctx.skip_code(8) # skip bitmap
def set_range(self, ctx):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= self.char <= ctx.peek_code(2):
return self.ok
ctx.skip_code(3)
def set_negate(self, ctx):
self.ok = not self.ok
ctx.skip_code(1)
#fixme brython. array module doesn't exist
def set_bigcharset(self, ctx):
raise NotImplementationError("_sre.py: set_bigcharset, array not implemented")
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
char_code = self.char
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = array.array("B")
a.fromstring(array.array(CODESIZE == 2 and "H" or "I",
[ctx.peek_code(block_index // CODESIZE)]).tostring())
block = a[block_index % CODESIZE]
ctx.skip_code(256 // CODESIZE) # skip block indices
block_value = ctx.peek_code(block * (32 // CODESIZE)
+ ((char_code & 255) >> (CODESIZE == 2 and 4 or 5)))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return self.ok
else:
ctx.skip_code(256 // CODESIZE) # skip block indices
ctx.skip_code(count * (32 // CODESIZE)) # skip blocks
def unknown(self, ctx):
return False
_CharsetDispatcher.build_dispatch_table(OPCODES, "set_")
class _AtcodeDispatcher(_Dispatcher):
def at_beginning(self, ctx):
return ctx.at_beginning()
at_beginning_string = at_beginning
def at_beginning_line(self, ctx):
return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1))
def at_end(self, ctx):
return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end()
def at_end_line(self, ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(self, ctx):
return ctx.at_end()
def at_boundary(self, ctx):
return ctx.at_boundary(_is_word)
def at_non_boundary(self, ctx):
return not ctx.at_boundary(_is_word)
def at_loc_boundary(self, ctx):
return ctx.at_boundary(_is_loc_word)
def at_loc_non_boundary(self, ctx):
return not ctx.at_boundary(_is_loc_word)
def at_uni_boundary(self, ctx):
return ctx.at_boundary(_is_uni_word)
def at_uni_non_boundary(self, ctx):
return not ctx.at_boundary(_is_uni_word)
def unknown(self, ctx):
return False
_AtcodeDispatcher.build_dispatch_table(ATCODES, "")
class _ChcodeDispatcher(_Dispatcher):
def category_digit(self, ctx):
return _is_digit(ctx.peek_char())
def category_not_digit(self, ctx):
return not _is_digit(ctx.peek_char())
def category_space(self, ctx):
return _is_space(ctx.peek_char())
def category_not_space(self, ctx):
return not _is_space(ctx.peek_char())
def category_word(self, ctx):
return _is_word(ctx.peek_char())
def category_not_word(self, ctx):
return not _is_word(ctx.peek_char())
def category_linebreak(self, ctx):
return _is_linebreak(ctx.peek_char())
def category_not_linebreak(self, ctx):
return not _is_linebreak(ctx.peek_char())
def category_loc_word(self, ctx):
return _is_loc_word(ctx.peek_char())
def category_loc_not_word(self, ctx):
return not _is_loc_word(ctx.peek_char())
def category_uni_digit(self, ctx):
return ctx.peek_char().isdigit()
def category_uni_not_digit(self, ctx):
return not ctx.peek_char().isdigit()
def category_uni_space(self, ctx):
return ctx.peek_char().isspace()
def category_uni_not_space(self, ctx):
return not ctx.peek_char().isspace()
def category_uni_word(self, ctx):
return _is_uni_word(ctx.peek_char())
def category_uni_not_word(self, ctx):
return not _is_uni_word(ctx.peek_char())
def category_uni_linebreak(self, ctx):
return ord(ctx.peek_char()) in _uni_linebreaks
def category_uni_not_linebreak(self, ctx):
return ord(ctx.peek_char()) not in _uni_linebreaks
def unknown(self, ctx):
return False
_ChcodeDispatcher.build_dispatch_table(CHCODES, "")
_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
def _is_digit(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 1
def _is_space(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 2
def _is_word(char):
# NB: non-ASCII chars aren't words according to _sre.c
code = ord(char)
return code < 128 and _ascii_char_info[code] & 16
def _is_loc_word(char):
return (not (ord(char) & ~255) and char.isalnum()) or char == '_'
def _is_uni_word(char):
# not valid in python 3
#return unichr(ord(char)).isalnum() or char == '_'
return chr(ord(char)).isalnum() or char == '_'
def _is_linebreak(char):
return char == "\n"
# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK.
_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233]
def _log(message):
if 0:
print(message)
|
richardcypher/node-gyp
|
refs/heads/master
|
gyp/pylib/gyp/win_tool.py
|
231
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
from ctypes import windll, wintypes
import os
import shutil
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class LinkLock(object):
"""A flock-style lock to limit the number of concurrent links to one.
Uses a session-local mutex based on the file's directory.
"""
def __enter__(self):
name = 'Local\\%s' % BASE_DIR.replace('\\', '_').replace(':', '_')
self.mutex = windll.kernel32.CreateMutexW(
wintypes.c_int(0),
wintypes.c_int(0),
wintypes.create_unicode_buffer(name))
assert self.mutex
result = windll.kernel32.WaitForSingleObject(
self.mutex, wintypes.c_int(0xFFFFFFFF))
# 0x80 means another process was killed without releasing the mutex, but
# that this process has been given ownership. This is fine for our
# purposes.
assert result in (0, 0x80), (
"%s, %s" % (result, windll.kernel32.GetLastError()))
def __exit__(self, type, value, traceback):
windll.kernel32.ReleaseMutex(self.mutex)
windll.kernel32.CloseHandle(self.mutex)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
shutil.rmtree(dest)
else:
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
with LinkLock():
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if not line.startswith(' Creating library '):
print line
return popen.returncode
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefix = 'Processing '
processing = set(os.path.basename(x) for x in lines if x.startswith(prefix))
for line in lines:
if not line.startswith(prefix) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
# MSVS doesn't assemble x64 asm files.
if arch == 'environment.x64':
return 0
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
args = open(rspfile).read()
dir = dir[0] if dir else None
popen = subprocess.Popen(args, shell=True, env=env, cwd=dir)
popen.wait()
return popen.returncode
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
yelizariev/pos-addons
|
refs/heads/9.0
|
tg_pos_packs/__openerp__.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 - Thierry Godin. All Rights Reserved
# @author Thierry Godin <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'TG customizable product packs',
'version': '1.0',
'category': 'Point of sale',
'author': 'Thierry Godin, IT-Projects LLC, Ivan Yelizariev',
'website': 'https://yelizariev.github.io/',
'summary': 'Create on demand packs of products for Point of sale',
'description': """
Allows the creation of products as Packs that can contain products with variants.
=====================================================================================
Building a Pack :
-------------------
- Create a product as usual (via Product Variants Menu) and name it, say Pack1
- Check Custom Pack checkbox
- Type of product should be set to Service automatically
- Go on Pack tab
- Add templates products grouped by index : each group of templates will be displayed in a list in POS
- That's all
Packs are avaiable in POS :
---------------------------
Packs are avaiable in POS as usual products (visualy).
When you select a pack, a new screen is displayed and you can select one template for each group, the select the needed variant you set previously.
Pack is added to caddie as well as its products, but thoses products prices are set to 0.00 as well as on receipt.
This way, you can set a reduced price for the pack , even if the sumn of each products may be superior. (This is for what packs are made),
but also, you can build your pack on-demand, by making groups of templates .
Tested on odoo 8.0 33a8989d77f44b093214550b8f23cb386a990981
""",
'depends': [
'product', 'point_of_sale'
],
'data': [
'pos_product_pack_view.xml',
'security/ir.model.access.csv',
],
'qweb': [
'static/src/xml/pos.xml',
],
'installable': False,
'active': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Techcable/TechBot
|
refs/heads/master
|
plugins/minecraft_wiki.py
|
32
|
import re
import requests
from lxml import html
from cloudbot import hook
from cloudbot.util import formatting
api_url = "http://minecraft.gamepedia.com/api.php?action=opensearch"
mc_url = "http://minecraft.gamepedia.com/"
@hook.command()
def mcwiki(text):
"""mcwiki <phrase> - gets the first paragraph of the Minecraft Wiki article on <phrase>"""
try:
request = requests.get(api_url, params={'search': text.strip()})
request.raise_for_status()
j = request.json()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
return "Error fetching search results: {}".format(e)
except ValueError as e:
return "Error reading search results: {}".format(e)
if not j[1]:
return "No results found."
# we remove items with a '/' in the name, because
# gamepedia uses sub-pages for different languages
# for some stupid reason
items = [item for item in j[1] if "/" not in item]
if items:
article_name = items[0].replace(' ', '_').encode('utf8')
else:
# there are no items without /, just return a / one
article_name = j[1][0].replace(' ', '_').encode('utf8')
url = mc_url + requests.utils.quote(article_name, '')
try:
request_ = requests.get(url)
request_.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
return "Error fetching wiki page: {}".format(e)
page = html.fromstring(request_.text)
for p in page.xpath('//div[@class="mw-content-ltr"]/p'):
if p.text_content():
summary = " ".join(p.text_content().splitlines())
summary = re.sub("\[\d+\]", "", summary)
summary = formatting.truncate(summary, 200)
return "{} :: {}".format(summary, url)
# this shouldn't happen
return "Unknown Error."
|
ccn-2m/django
|
refs/heads/master
|
tests/regressiontests/generic_inline_admin/urls.py
|
150
|
from __future__ import absolute_import
from django.conf.urls import patterns, include
from . import admin
urlpatterns = patterns('',
(r'^generic_inline_admin/admin/', include(admin.site.urls)),
)
|
astrobin/astrobin
|
refs/heads/master
|
astrobin_apps_groups/tasks.py
|
1
|
import logging
from celery import shared_task
from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from astrobin_apps_groups.models import Group
from astrobin_apps_notifications.utils import push_notification, build_notification_url
logger = logging.getLogger('apps')
@shared_task(time_limit=120)
def push_notification_for_group_join_request_approval(group_pk, user_pk, moderator_pk):
try:
group = Group.objects.get(pk=group_pk)
except Group.DoesNotExist:
logger.warning('push_notification_for_group_join_request_approval: group not found: %d' % group_pk)
return
try:
user = User.objects.get(pk=user_pk)
except User.DoesNotExist:
logger.warning('push_notification_for_group_join_request_approval: user not found: %d' % user_pk)
return
try:
moderator = User.objects.get(pk=moderator_pk)
except User.DoesNotExist:
logger.warning('push_notification_for_group_join_request_approval: moderator not found: %d' % moderator_pk)
return
push_notification(
[user], moderator, 'group_join_request_approved',
{
'group_name': group.name,
'url': build_notification_url(
settings.BASE_URL + reverse('group_detail', args=(group.pk,)), moderator
),
})
@shared_task(time_limit=120)
def push_notification_for_group_join_request_rejection(group_pk, user_pk, moderator_pk):
try:
group = Group.objects.get(pk=group_pk)
except Group.DoesNotExist:
logger.warning('push_notification_for_group_join_request_rejection: group not found: %d' % group_pk)
return
try:
user = User.objects.get(pk=user_pk)
except User.DoesNotExist:
logger.warning('push_notification_for_group_join_request_rejection: user not found: %d' % user_pk)
return
try:
moderator = User.objects.get(pk=moderator_pk)
except User.DoesNotExist:
logger.warning('push_notification_for_group_join_request_rejection: moderator not found: %d' % moderator_pk)
return
push_notification(
[user], moderator, 'group_join_request_rejected',
{
'group_name': group.name,
'url': build_notification_url(
settings.BASE_URL + reverse('group_detail', args=(group.pk,)), moderator
),
})
|
spirrello/spirrello-pynet-work
|
refs/heads/master
|
applied_python/lib/python2.7/site-packages/pip/commands/list.py
|
84
|
from __future__ import absolute_import
import logging
import warnings
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import PackageFinder
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.utils.deprecation import RemovedInPip7Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version in self.find_packages_latests_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s)',
dist.project_name, dist.version, version,
)
def find_packages_latests_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
)
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
remote_version = finder._link_package_versions(
link, req.name
).version
yield dist, remote_version
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version in self.find_packages_latests_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
|
lsiemens/iprocess-projects
|
refs/heads/master
|
psipy/animate_analytic.py
|
1
|
####
#
# Copyright (c) 2015, Jake Vanderplas, Luke Siemens
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
####
"""
Solve and animate the Schrodinger equation
First presented at http://jakevdp.github.com/blog/2012/09/05/quantum-python/
Authors:
- Jake Vanderplas <[email protected]>
- Luke Siemens (small modifications, switching from k to p-space)
License: BSD
"""
import matplotlib.pyplot as plt
from matplotlib import animation
import numpy as np
import analytic
import units
######################################################################
# Helper functions for gaussian wave-packets
def gauss_x(x, a, x0, k0):
"""
a gaussian wave packet of width a, centered at x0, with momentum k0
"""
return ((a * np.sqrt(np.pi)) ** (-0.5)
* np.exp(-0.5 * ((x - x0) * 1. / a) ** 2 + 1j * x * k0))
def gauss_p(p, a, x0, p0):
"""
analytical fourier transform of gauss_x(x), above
"""
return ((a / np.sqrt(np.pi)) ** 0.5
* np.exp(-0.5 * (a * (p - p0)) ** 2 - 1j * (p - p0) * x0))
######################################################################
# Utility functions for running the animation
def theta(x):
"""
theta function :
returns 0 if x<=0, and 1 if x>0
"""
x = np.asarray(x)
y = np.zeros(x.shape)
y[x > 0] = 1.0
return y
def square_barrier(x, width, height):
return height * (theta(x) - theta(x - width))
######################################################################
# Create the animation
unit_sys = units.units(10**-12, mode = "abs")
_T = unit_sys.get_T()
_T.set_format("{:1.3e}")
_E = unit_sys.get_E()
# specify time steps and duration
dt = 0.00041
N_steps = 100
t_max = 120
ylim = (-2.0, 2.0)
frames = int(100)
# specify constants
hbar = 1.0 # planck's constant
m = 2.0 # particle mass
# specify range in x coordinate
N = 2 ** 13
dx = 6.0 / float(N)
x = dx * (np.arange(N) - 0.5 * N)
# specify potential
V0 = 1.5
L = x[-1]-x[0]
a = 3 * L
x0 = -60 * L
V_x = square_barrier(x, a, 0.5)
#V_x[x < -0.49] = 1E6
#V_x[x > 0.49] = 1E6
# specify initial momentum and quantities derived from it
p0 = np.sqrt(2 * m * 0.2 * V0)
dp2 = p0 * p0 * 1. / 80
d = hbar / np.sqrt(2 * dp2)
v0 = p0 / m
psi_x0 = gauss_x(x, 10, -50, 0.5)
# define the Schrodinger object which performs the calculations
S = analytic.harmonic_well(x=x, k=4000.0, m=m, dt=dt, L=L)
#S.eigenbasis(130, np.sqrt(2/float(L))*np.cos(11*np.pi*x/L+0.2))
#S.eigenbasis(50, gauss_x(x, 0.15, 0, 0))
S.eigenbasis(151, square_barrier(x+L/4.0, L/2, 1))
#S.add_eigenstate([1,3,6,7,4], [1.0,2.0,3.5,4.1,5.7])
#S.add_eigenstate([3,4], [1.0,1.0])
#S.add_eigenstate([1], [1.0j])
print 1*_E
print S.get_energy_n(3)*_E
print S.get_energy_n(4)*_E
######################################################################
# Set up plot
fig = plt.figure()
# plotting limits
xlim = (-3, 3)
plim = (-28, 28)
# top axes show the x-space data
ax1 = fig.add_subplot(111, xlim=xlim, ylim=ylim)
psi_x_line, = ax1.plot([], [], c='r', label=r'$|\psi(x)|$')
time = ax1.text(0, 0, "")
ax1.legend(prop=dict(size=12))
ax1.set_xlabel('$x$')
ax1.set_ylabel(r'$|\psi(x)|$')
######################################################################
# Functions to Animate the plot
def init():
psi_x_line.set_data([], [])
time.set_text("")
return (psi_x_line, time)
def animate(i):
S.time_step()
psi = S.get_psi()
psi = np.real(np.conj(psi)*psi)
psi_x_line.set_data(S.x, psi)
time.set_text("t = " + str(abs(S.t)*_T))
return (psi_x_line, time)
# call the animator.
# blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=30, blit=True)
# uncomment the following line to save the video in mp4 format. This
# requires either mencoder or ffmpeg to be installed on your system
#anim.save('schrodinger_barrier.mp4', fps=15,
# extra_args=['-vcodec', 'libx264'])
plt.show()
|
HousekeepLtd/django
|
refs/heads/master
|
tests/queryset_pickle/tests.py
|
209
|
from __future__ import unicode_literals
import datetime
import pickle
import unittest
import warnings
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.version import get_version
from .models import Container, Event, Group, Happening, M2MModel
class PickleabilityTestCase(TestCase):
def setUp(self):
Happening.objects.create() # make sure the defaults are working (#20158)
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_string_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
@unittest.skipIf(six.PY2, "Field doesn't exist on Python 2.")
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_filter_reverse_fk(self):
self.assert_pickles(Group.objects.filter(event=1))
def test_doesnotexist_exception(self):
# Ticket #17776
original = Event.DoesNotExist("Doesn't exist")
unpickled = pickle.loads(pickle.dumps(original))
# Exceptions are not equal to equivalent instances of themselves, so
# can't just use assertEqual(original, unpickled)
self.assertEqual(original.__class__, unpickled.__class__)
self.assertEqual(original.args, unpickled.args)
def test_manager_pickle(self):
pickle.loads(pickle.dumps(Happening.objects))
def test_model_pickle(self):
"""
Test that a model not defined on module level is pickleable.
"""
original = Container.SomeModel(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
# Also, deferred dynamic model works
Container.SomeModel.objects.create(somefield=1)
original = Container.SomeModel.objects.defer('somefield')[0]
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertEqual(original.somefield, reloaded.somefield)
def test_model_pickle_m2m(self):
"""
Test intentionally the automatically created through model.
"""
m1 = M2MModel.objects.create()
g1 = Group.objects.create(name='foof')
m1.groups.add(g1)
m2m_through = M2MModel._meta.get_field('groups').remote_field.through
original = m2m_through.objects.get()
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
def test_model_pickle_dynamic(self):
class Meta:
proxy = True
dynclass = type(str("DynamicEventSubclass"), (Event, ),
{'Meta': Meta, '__module__': Event.__module__})
original = dynclass(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertIs(reloaded.__class__, dynclass)
def test_specialized_queryset(self):
self.assert_pickles(Happening.objects.values('name'))
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
# With related field (#14515)
self.assert_pickles(
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
)
def test_pickle_prefetch_related_idempotence(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related('event_set')
# First pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
# Second pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self):
"""
#24831 -- Cached properties on ManyToOneRel created in QuerySet.delete()
caused subsequent QuerySet pickling to fail.
"""
g = Group.objects.create(name='foo')
m2m = M2MModel.objects.create()
m2m.groups.add(g)
Group.objects.all().delete()
m2ms = M2MModel.objects.prefetch_related('groups')
m2ms = pickle.loads(pickle.dumps(m2ms))
self.assertQuerysetEqual(m2ms, [m2m], lambda x: x)
def test_missing_django_version_unpickling(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled without a Django version
"""
qs = Group.missing_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(msg,
"Pickled queryset instance's Django version is not specified.")
def test_unsupported_unpickle(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled with a different Django version than the current
"""
qs = Group.previous_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(
msg,
"Pickled queryset instance's Django version 1.0 does not "
"match the current version %s." % get_version()
)
|
rooshilp/CMPUT410Lab5
|
refs/heads/master
|
env-lab5/lib/python2.7/site-packages/jinja2/testsuite/doctests.py
|
532
|
# -*- coding: utf-8 -*-
"""
jinja2.testsuite.doctests
~~~~~~~~~~~~~~~~~~~~~~~~~
The doctests. Collects all tests we want to test from
the Jinja modules.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
import doctest
def suite():
from jinja2 import utils, sandbox, runtime, meta, loaders, \
ext, environment, bccache, nodes
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(utils))
suite.addTest(doctest.DocTestSuite(sandbox))
suite.addTest(doctest.DocTestSuite(runtime))
suite.addTest(doctest.DocTestSuite(meta))
suite.addTest(doctest.DocTestSuite(loaders))
suite.addTest(doctest.DocTestSuite(ext))
suite.addTest(doctest.DocTestSuite(environment))
suite.addTest(doctest.DocTestSuite(bccache))
suite.addTest(doctest.DocTestSuite(nodes))
return suite
|
gchp/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_stringformat.py
|
345
|
from django.template.defaultfilters import stringformat
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class StringformatTests(SimpleTestCase):
"""
Notice that escaping is applied *after* any filters, so the string
formatting here only needs to deal with pre-escaped characters.
"""
@setup({'stringformat01':
'{% autoescape off %}.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.{% endautoescape %}'})
def test_stringformat01(self):
output = self.engine.render_to_string('stringformat01', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
@setup({'stringformat02': '.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.'})
def test_stringformat02(self):
output = self.engine.render_to_string('stringformat02', {'a': 'a<b', 'b': mark_safe('a<b')})
self.assertEqual(output, '. a<b. . a<b.')
class FunctionTests(SimpleTestCase):
def test_format(self):
self.assertEqual(stringformat(1, '03d'), '001')
def test_invalid(self):
self.assertEqual(stringformat(1, 'z'), '')
|
fosfataza/protwis
|
refs/heads/master
|
build_gpcr/management/commands/build_human_residues.py
|
3
|
from build.management.commands.build_human_residues import Command as BuildHumanResidues
class Command(BuildHumanResidues):
pass
|
feilongfl/micropython
|
refs/heads/master
|
tests/basics/class_contains.py
|
117
|
# A contains everything
class A:
def __contains__(self, key):
return True
a = A()
print(True in a)
print(1 in a)
print(() in a)
# B contains given things
class B:
def __init__(self, items):
self.items = items
def __contains__(self, key):
return key in self.items
b = B([])
print(1 in b)
b = B([1, 2])
print(1 in b)
print(2 in b)
print(3 in b)
|
jbq/uwsgi
|
refs/heads/master
|
plugins/mongodb/uwsgiplugin.py
|
13
|
NAME='mongodb'
CFLAGS = []
LDFLAGS = []
LIBS = ['-Wl,-whole-archive', '-lmongoclient', '-Wl,-no-whole-archive', '-lboost_thread', '-lboost_system', '-lboost_filesystem']
GCC_LIST = ['plugin']
|
petteyg/intellij-community
|
refs/heads/master
|
python/testData/mover/oneStatementInClass.py
|
83
|
class B(Exception):
pas<caret>s #<---here
class C(B):
pass
|
bgammill/namebench
|
refs/heads/master
|
cocoa/controller.py
|
176
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cocoa frontend implementation for namebench."""
__author__ = '[email protected] (Thomas Stromberg)'
import os
import sys
import traceback
from Foundation import *
from AppKit import *
from objc import IBAction, IBOutlet
from libnamebench import addr_util
from libnamebench import base_ui
from libnamebench import config
from libnamebench import conn_quality
from libnamebench import nameserver_list
from libnamebench import util
from libnamebench import version
# How much room do we have in the UI for status messages?
MAX_STATUS_LENGTH = 68
class controller(NSWindowController, base_ui.BaseUI):
"""Controller class associated with the main window."""
nameserver_form = IBOutlet()
include_global = IBOutlet()
include_regional = IBOutlet()
include_censorship_checks = IBOutlet()
data_source = IBOutlet()
health_performance = IBOutlet()
enable_sharing = IBOutlet()
location = IBOutlet()
query_count = IBOutlet()
run_count = IBOutlet()
status = IBOutlet()
spinner = IBOutlet()
button = IBOutlet()
def awakeFromNib(self):
"""Initializes our class, called automatically by Cocoa."""
self.SetupDataStructures()
self.resource_dir = os.path.join(os.getcwd(), 'namebench.app', 'Contents', 'Resources')
conf_file = util.FindDataFile('config/namebench.cfg')
(self.options, self.supplied_ns, self.global_ns, self.regional_ns) = config.GetConfiguration(filename=conf_file)
# TODO(tstromberg): Consider moving this into a thread for faster loading.
self.UpdateStatus('Discovering sources')
self.LoadDataSources()
self.UpdateStatus('Discovering location')
self.DiscoverLocation()
self.UpdateStatus('Populating Form...')
self.setFormDefaults()
self.UpdateStatus('namebench %s is ready!' % version.VERSION)
@IBAction
def startJob_(self, sender):
"""Trigger for the 'Start Benchmark' button, starts benchmark thread."""
self.ProcessForm()
self.UpdateStatus('Starting benchmark thread')
t = NSThread.alloc().initWithTarget_selector_object_(self, self.benchmarkThread, None)
t.start()
def UpdateStatus(self, message, count=None, total=None, error=False, debug=False):
"""Update the status message at the bottom of the window."""
if error:
return self.displayError(message, error)
if total:
state = '%s [%s/%s]' % (message, count, total)
elif count:
state = '%s%s' % (message, '.' * count)
else:
state = message
state = state.replace('%', '%%')
print state
NSLog(state)
self.status.setStringValue_(state[0:MAX_STATUS_LENGTH])
def ProcessForm(self):
"""Parse the form fields and populate class variables."""
self.UpdateStatus('Processing form inputs')
self.preferred = self.supplied_ns
self.include_internal = False
if not int(self.include_global.stringValue()):
self.UpdateStatus('Not using global')
self.global_ns = []
else:
self.preferred.extend(self.global_ns)
if not int(self.include_regional.stringValue()):
self.UpdateStatus('Not using regional')
self.regional_ns = []
if int(self.enable_sharing.stringValue()):
self.options.upload_results = True
if int(self.include_censorship_checks.stringValue()):
self.options.enable_censorship_checks = True
print self.health_performance.titleOfSelectedItem()
if 'Slow' in self.health_performance.titleOfSelectedItem():
self.options.health_thread_count = 10
self.options.input_source = self.data_src.ConvertSourceTitleToType(self.data_source.titleOfSelectedItem())
self.UpdateStatus('Supplied servers: %s' % self.nameserver_form.stringValue())
self.preferred.extend(addr_util.ExtractIPTuplesFromString(self.nameserver_form.stringValue()))
self.options.query_count = int(self.query_count.stringValue())
def benchmarkThread(self):
"""Run the benchmarks, designed to be run in a thread."""
pool = NSAutoreleasePool.alloc().init()
self.spinner.startAnimation_(self)
self.button.setEnabled_(False)
self.UpdateStatus('Preparing benchmark')
try:
self.PrepareTestRecords()
self.PrepareNameServers()
self.PrepareBenchmark()
self.RunAndOpenReports()
except nameserver_list.OutgoingUdpInterception:
(exc_type, exception, tb) = sys.exc_info()
self.UpdateStatus('Outgoing requests were intercepted!',
error=str(exception))
except nameserver_list.TooFewNameservers:
(exc_type, exception, tb) = sys.exc_info()
self.UpdateStatus('Too few nameservers to test', error=str(exception))
except conn_quality.OfflineConnection:
(exc_type, exception, tb) = sys.exc_info()
self.UpdateStatus('The connection appears to be offline!', error=str(exception))
except:
(exc_type, exception, tb) = sys.exc_info()
traceback.print_exc(tb)
error_msg = '\n'.join(traceback.format_tb(tb)[-4:])
self.UpdateStatus('FAIL: %s' % exception, error=error_msg)
self.spinner.stopAnimation_(self)
self.button.setEnabled_(True)
# This seems weird, but recommended by http://pyobjc.sourceforge.net/documentation/pyobjc-core/intro.html
del pool
def displayError(self, msg, details):
"""Display an alert drop-down message."""
NSLog('ERROR: %s - %s' % (msg, details))
alert = NSAlert.alloc().init()
alert.setMessageText_(msg)
alert.setInformativeText_(details)
buttonPressed = alert.runModal()
def setFormDefaults(self):
"""Set up the form with sane initial values."""
nameservers_string = ', '.join(nameserver_list.InternalNameServers())
self.nameserver_form.setStringValue_(nameservers_string)
self.query_count.setStringValue_(self.options.query_count)
self.query_count.setStringValue_(self.options.query_count)
self.location.removeAllItems()
if self.country:
self.location.addItemWithTitle_(self.country)
self.location.addItemWithTitle_('(Other)')
else:
self.location.addItemWithTitle_('(automatic)')
self.health_performance.removeAllItems()
self.health_performance.addItemWithTitle_('Fast')
self.health_performance.addItemWithTitle_('Slow (unstable network)')
self.data_source.removeAllItems()
self.data_source.addItemsWithTitles_(self.data_src.ListSourceTitles())
|
saturnism/kubernetes
|
refs/heads/master
|
build/json-extractor.py
|
413
|
#!/usr/bin/env python
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a very simple utility that reads a JSON document from stdin, parses it
# and returns the specified value. The value is described using a simple dot
# notation. If any errors are encountered along the way, an error is output and
# a failure value is returned.
from __future__ import print_function
import json
import sys
def PrintError(*err):
print(*err, file=sys.stderr)
def main():
try:
obj = json.load(sys.stdin)
except Exception, e:
PrintError("Error loading JSON: {0}".format(str(e)))
if len(sys.argv) == 1:
# if we don't have a query string, return success
return 0
elif len(sys.argv) > 2:
PrintError("Usage: {0} <json query>".format(sys.args[0]))
return 1
query_list = sys.argv[1].split('.')
for q in query_list:
if isinstance(obj, dict):
if q not in obj:
PrintError("Couldn't find '{0}' in dict".format(q))
return 1
obj = obj[q]
elif isinstance(obj, list):
try:
index = int(q)
except:
PrintError("Can't use '{0}' to index into array".format(q))
return 1
if index >= len(obj):
PrintError("Index ({0}) is greater than length of list ({1})".format(q, len(obj)))
return 1
obj = obj[index]
else:
PrintError("Trying to query non-queryable object: {0}".format(q))
return 1
if isinstance(obj, str):
print(obj)
else:
print(json.dumps(obj, indent=2))
if __name__ == "__main__":
sys.exit(main())
|
dovydas/mezzanine
|
refs/heads/master
|
mezzanine/galleries/translation.py
|
48
|
from modeltranslation.translator import translator, TranslationOptions
from mezzanine.core.translation import TranslatedRichText
from mezzanine.galleries.models import GalleryImage, Gallery
class TranslatedGallery(TranslatedRichText):
fields = ()
class TranslatedGalleryImage(TranslationOptions):
fields = ('description',)
translator.register(Gallery, TranslatedGallery)
translator.register(GalleryImage, TranslatedGalleryImage)
|
xfournet/intellij-community
|
refs/heads/master
|
python/helpers/py2only/docutils/parsers/rst/languages/zh_tw.py
|
128
|
# -*- coding: utf-8 -*-
# $Id: zh_tw.py 7119 2011-09-02 13:00:23Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Traditional Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'code (translation required)': 'code',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions (translation required)': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
'math (translation required)': 'math',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
u'日期': 'date',
'class (translation required)': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Traditional Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translation required)': 'acronym',
u'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'sup (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Traditional Chinese role names to canonical role names for
interpreted text."""
|
GenericStudent/home-assistant
|
refs/heads/dev
|
tests/components/switch/test_reproduce_state.py
|
21
|
"""Test reproduce state for Switch."""
from homeassistant.core import State
from tests.common import async_mock_service
async def test_reproducing_states(hass, caplog):
"""Test reproducing Switch states."""
hass.states.async_set("switch.entity_off", "off", {})
hass.states.async_set("switch.entity_on", "on", {})
turn_on_calls = async_mock_service(hass, "switch", "turn_on")
turn_off_calls = async_mock_service(hass, "switch", "turn_off")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[State("switch.entity_off", "off"), State("switch.entity_on", "on", {})],
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("switch.entity_off", "not_supported")]
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("switch.entity_on", "off"),
State("switch.entity_off", "on", {}),
# Should not raise
State("switch.non_existing", "on"),
]
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "switch"
assert turn_on_calls[0].data == {"entity_id": "switch.entity_off"}
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "switch"
assert turn_off_calls[0].data == {"entity_id": "switch.entity_on"}
|
odoo-brazil/l10n-brazil-wip
|
refs/heads/10.0-develop
|
l10n_br_sale/__manifest__.py
|
3
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
{
'name': 'Brazilian Localization Sale',
'category': 'Localisation',
'license': 'AGPL-3',
'author': 'Akretion, Odoo Community Association (OCA)',
'website': 'http://odoo-brasil.org',
'version': '8.0.1.0.0',
'depends': [
'l10n_br_account',
'account_fiscal_position_rule_sale',
],
'data': [
'data/l10n_br_sale_data.xml',
'views/sale_view.xml',
'views/res_config_view.xml',
'security/ir.model.access.csv',
'security/l10n_br_sale_security.xml',
'report/sale_report_view.xml',
],
'test': [],
'demo': [],
'installable': False,
'auto_install': True,
}
|
IndraVikas/scikit-learn
|
refs/heads/master
|
examples/linear_model/plot_sgd_weighted_samples.py
|
344
|
"""
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
|
angad/libjingle-mac
|
refs/heads/master
|
scons-2.2.0/engine/SCons/Tool/JavaCommon.py
|
14
|
"""SCons.Tool.JavaCommon
Stuff for processing Java.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/JavaCommon.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os
import os.path
import re
java_parsing = 1
default_java_version = '1.4'
if java_parsing:
# Parse Java files for class names.
#
# This is a really cool parser from Charles Crain
# that finds appropriate class names in Java source.
# A regular expression that will find, in a java file:
# newlines;
# double-backslashes;
# a single-line comment "//";
# single or double quotes preceeded by a backslash;
# single quotes, double quotes, open or close braces, semi-colons,
# periods, open or close parentheses;
# floating-point numbers;
# any alphanumeric token (keyword, class name, specifier);
# any alphanumeric token surrounded by angle brackets (generics);
# the multi-line comment begin and end tokens /* and */;
# array declarations "[]".
_reToken = re.compile(r'(\n|\\\\|//|\\[\'"]|[\'"\{\}\;\.\(\)]|' +
r'\d*\.\d*|[A-Za-z_][\w\$\.]*|<[A-Za-z_]\w+>|' +
r'/\*|\*/|\[\])')
class OuterState(object):
"""The initial state for parsing a Java file for classes,
interfaces, and anonymous inner classes."""
def __init__(self, version=default_java_version):
if not version in ('1.1', '1.2', '1.3','1.4', '1.5', '1.6',
'5', '6'):
msg = "Java version %s not supported" % version
raise NotImplementedError(msg)
self.version = version
self.listClasses = []
self.listOutputs = []
self.stackBrackets = []
self.brackets = 0
self.nextAnon = 1
self.localClasses = []
self.stackAnonClassBrackets = []
self.anonStacksStack = [[0]]
self.package = None
def trace(self):
pass
def __getClassState(self):
try:
return self.classState
except AttributeError:
ret = ClassState(self)
self.classState = ret
return ret
def __getPackageState(self):
try:
return self.packageState
except AttributeError:
ret = PackageState(self)
self.packageState = ret
return ret
def __getAnonClassState(self):
try:
return self.anonState
except AttributeError:
self.outer_state = self
ret = SkipState(1, AnonClassState(self))
self.anonState = ret
return ret
def __getSkipState(self):
try:
return self.skipState
except AttributeError:
ret = SkipState(1, self)
self.skipState = ret
return ret
def __getAnonStack(self):
return self.anonStacksStack[-1]
def openBracket(self):
self.brackets = self.brackets + 1
def closeBracket(self):
self.brackets = self.brackets - 1
if len(self.stackBrackets) and \
self.brackets == self.stackBrackets[-1]:
self.listOutputs.append('$'.join(self.listClasses))
self.localClasses.pop()
self.listClasses.pop()
self.anonStacksStack.pop()
self.stackBrackets.pop()
if len(self.stackAnonClassBrackets) and \
self.brackets == self.stackAnonClassBrackets[-1]:
self.__getAnonStack().pop()
self.stackAnonClassBrackets.pop()
def parseToken(self, token):
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '{':
self.openBracket()
elif token == '}':
self.closeBracket()
elif token in [ '"', "'" ]:
return IgnoreState(token, self)
elif token == "new":
# anonymous inner class
if len(self.listClasses) > 0:
return self.__getAnonClassState()
return self.__getSkipState() # Skip the class name
elif token in ['class', 'interface', 'enum']:
if len(self.listClasses) == 0:
self.nextAnon = 1
self.stackBrackets.append(self.brackets)
return self.__getClassState()
elif token == 'package':
return self.__getPackageState()
elif token == '.':
# Skip the attribute, it might be named "class", in which
# case we don't want to treat the following token as
# an inner class name...
return self.__getSkipState()
return self
def addAnonClass(self):
"""Add an anonymous inner class"""
if self.version in ('1.1', '1.2', '1.3', '1.4'):
clazz = self.listClasses[0]
self.listOutputs.append('%s$%d' % (clazz, self.nextAnon))
elif self.version in ('1.5', '1.6', '5', '6'):
self.stackAnonClassBrackets.append(self.brackets)
className = []
className.extend(self.listClasses)
self.__getAnonStack()[-1] = self.__getAnonStack()[-1] + 1
for anon in self.__getAnonStack():
className.append(str(anon))
self.listOutputs.append('$'.join(className))
self.nextAnon = self.nextAnon + 1
self.__getAnonStack().append(0)
def setPackage(self, package):
self.package = package
class AnonClassState(object):
"""A state that looks for anonymous inner classes."""
def __init__(self, old_state):
# outer_state is always an instance of OuterState
self.outer_state = old_state.outer_state
self.old_state = old_state
self.brace_level = 0
def parseToken(self, token):
# This is an anonymous class if and only if the next
# non-whitespace token is a bracket. Everything between
# braces should be parsed as normal java code.
if token[:2] == '//':
return IgnoreState('\n', self)
elif token == '/*':
return IgnoreState('*/', self)
elif token == '\n':
return self
elif token[0] == '<' and token[-1] == '>':
return self
elif token == '(':
self.brace_level = self.brace_level + 1
return self
if self.brace_level > 0:
if token == 'new':
# look further for anonymous inner class
return SkipState(1, AnonClassState(self))
elif token in [ '"', "'" ]:
return IgnoreState(token, self)
elif token == ')':
self.brace_level = self.brace_level - 1
return self
if token == '{':
self.outer_state.addAnonClass()
return self.old_state.parseToken(token)
class SkipState(object):
"""A state that will skip a specified number of tokens before
reverting to the previous state."""
def __init__(self, tokens_to_skip, old_state):
self.tokens_to_skip = tokens_to_skip
self.old_state = old_state
def parseToken(self, token):
self.tokens_to_skip = self.tokens_to_skip - 1
if self.tokens_to_skip < 1:
return self.old_state
return self
class ClassState(object):
"""A state we go into when we hit a class or interface keyword."""
def __init__(self, outer_state):
# outer_state is always an instance of OuterState
self.outer_state = outer_state
def parseToken(self, token):
# the next non-whitespace token should be the name of the class
if token == '\n':
return self
# If that's an inner class which is declared in a method, it
# requires an index prepended to the class-name, e.g.
# 'Foo$1Inner' (Tigris Issue 2087)
if self.outer_state.localClasses and \
self.outer_state.stackBrackets[-1] > \
self.outer_state.stackBrackets[-2]+1:
locals = self.outer_state.localClasses[-1]
try:
idx = locals[token]
locals[token] = locals[token]+1
except KeyError:
locals[token] = 1
token = str(locals[token]) + token
self.outer_state.localClasses.append({})
self.outer_state.listClasses.append(token)
self.outer_state.anonStacksStack.append([0])
return self.outer_state
class IgnoreState(object):
"""A state that will ignore all tokens until it gets to a
specified token."""
def __init__(self, ignore_until, old_state):
self.ignore_until = ignore_until
self.old_state = old_state
def parseToken(self, token):
if self.ignore_until == token:
return self.old_state
return self
class PackageState(object):
"""The state we enter when we encounter the package keyword.
We assume the next token will be the package name."""
def __init__(self, outer_state):
# outer_state is always an instance of OuterState
self.outer_state = outer_state
def parseToken(self, token):
self.outer_state.setPackage(token)
return self.outer_state
def parse_java_file(fn, version=default_java_version):
return parse_java(open(fn, 'r').read(), version)
def parse_java(contents, version=default_java_version, trace=None):
"""Parse a .java file and return a double of package directory,
plus a list of .class files that compiling that .java file will
produce"""
package = None
initial = OuterState(version)
currstate = initial
for token in _reToken.findall(contents):
# The regex produces a bunch of groups, but only one will
# have anything in it.
currstate = currstate.parseToken(token)
if trace: trace(token, currstate)
if initial.package:
package = initial.package.replace('.', os.sep)
return (package, initial.listOutputs)
else:
# Don't actually parse Java files for class names.
#
# We might make this a configurable option in the future if
# Java-file parsing takes too long (although it shouldn't relative
# to how long the Java compiler itself seems to take...).
def parse_java_file(fn):
""" "Parse" a .java file.
This actually just splits the file name, so the assumption here
is that the file name matches the public class name, and that
the path to the file is the same as the package name.
"""
return os.path.split(file)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
deandunbar/html2bwml
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/contrib/formtools/wizard/storage/session.py
|
426
|
from django.contrib.formtools.wizard import storage
class SessionStorage(storage.BaseStorage):
def __init__(self, *args, **kwargs):
super(SessionStorage, self).__init__(*args, **kwargs)
if self.prefix not in self.request.session:
self.init_data()
def _get_data(self):
self.request.session.modified = True
return self.request.session[self.prefix]
def _set_data(self, value):
self.request.session[self.prefix] = value
self.request.session.modified = True
data = property(_get_data, _set_data)
|
lmregus/Portfolio
|
refs/heads/master
|
python/design_patterns/env/lib/python3.7/site-packages/setuptools/wheel.py
|
24
|
"""Wheels support."""
from distutils.util import get_platform
import email
import itertools
import os
import posixpath
import re
import zipfile
import pkg_resources
import setuptools
from pkg_resources import parse_version
from setuptools.extern.packaging.utils import canonicalize_name
from setuptools.extern.six import PY3
from setuptools import pep425tags
from setuptools.command.egg_info import write_requirements
__metaclass__ = type
WHEEL_NAME = re.compile(
r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
)\.whl$""",
re.VERBOSE).match
NAMESPACE_PACKAGE_INIT = '''\
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
'''
def unpack(src_dir, dst_dir):
'''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
for dirpath, dirnames, filenames in os.walk(src_dir):
subdir = os.path.relpath(dirpath, src_dir)
for f in filenames:
src = os.path.join(dirpath, f)
dst = os.path.join(dst_dir, subdir, f)
os.renames(src, dst)
for n, d in reversed(list(enumerate(dirnames))):
src = os.path.join(dirpath, d)
dst = os.path.join(dst_dir, subdir, d)
if not os.path.exists(dst):
# Directory does not exist in destination,
# rename it and prune it from os.walk list.
os.renames(src, dst)
del dirnames[n]
# Cleanup.
for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
assert not filenames
os.rmdir(dirpath)
class Wheel:
def __init__(self, filename):
match = WHEEL_NAME(os.path.basename(filename))
if match is None:
raise ValueError('invalid wheel name: %r' % filename)
self.filename = filename
for k, v in match.groupdict().items():
setattr(self, k, v)
def tags(self):
'''List tags (py_version, abi, platform) supported by this wheel.'''
return itertools.product(
self.py_version.split('.'),
self.abi.split('.'),
self.platform.split('.'),
)
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = pep425tags.get_supported()
return next((True for t in self.tags() if t in supported_tags), False)
def egg_name(self):
return pkg_resources.Distribution(
project_name=self.project_name, version=self.version,
platform=(None if self.platform == 'any' else get_platform()),
).egg_name() + '.egg'
def get_dist_info(self, zf):
# find the correct name of the .dist-info dir in the wheel file
for member in zf.namelist():
dirname = posixpath.dirname(member)
if (dirname.endswith('.dist-info') and
canonicalize_name(dirname).startswith(
canonicalize_name(self.project_name))):
return dirname
raise ValueError("unsupported wheel format. .dist-info not found")
def install_as_egg(self, destination_eggdir):
'''Install wheel as an egg directory.'''
with zipfile.ZipFile(self.filename) as zf:
self._install_as_egg(destination_eggdir, zf)
def _install_as_egg(self, destination_eggdir, zf):
dist_basename = '%s-%s' % (self.project_name, self.version)
dist_info = self.get_dist_info(zf)
dist_data = '%s.data' % dist_basename
egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
self._move_data_entries(destination_eggdir, dist_data)
self._fix_namespace_packages(egg_info, destination_eggdir)
@staticmethod
def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
def get_metadata(name):
with zf.open(posixpath.join(dist_info, name)) as fp:
value = fp.read().decode('utf-8') if PY3 else fp.read()
return email.parser.Parser().parsestr(value)
wheel_metadata = get_metadata('WHEEL')
# Check wheel format version is supported.
wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
wheel_v1 = (
parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
)
if not wheel_v1:
raise ValueError(
'unsupported wheel format version: %s' % wheel_version)
# Extract to target directory.
os.mkdir(destination_eggdir)
zf.extractall(destination_eggdir)
# Convert metadata.
dist_info = os.path.join(destination_eggdir, dist_info)
dist = pkg_resources.Distribution.from_location(
destination_eggdir, dist_info,
metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
)
# Note: Evaluate and strip markers now,
# as it's difficult to convert back from the syntax:
# foobar; "linux" in sys_platform and extra == 'test'
def raw_req(req):
req.marker = None
return str(req)
install_requires = list(sorted(map(raw_req, dist.requires())))
extras_require = {
extra: sorted(
req
for req in map(raw_req, dist.requires((extra,)))
if req not in install_requires
)
for extra in dist.extras
}
os.rename(dist_info, egg_info)
os.rename(
os.path.join(egg_info, 'METADATA'),
os.path.join(egg_info, 'PKG-INFO'),
)
setup_dist = setuptools.Distribution(
attrs=dict(
install_requires=install_requires,
extras_require=extras_require,
),
)
write_requirements(
setup_dist.get_command_obj('egg_info'),
None,
os.path.join(egg_info, 'requires.txt'),
)
@staticmethod
def _move_data_entries(destination_eggdir, dist_data):
"""Move data entries to their correct location."""
dist_data = os.path.join(destination_eggdir, dist_data)
dist_data_scripts = os.path.join(dist_data, 'scripts')
if os.path.exists(dist_data_scripts):
egg_info_scripts = os.path.join(
destination_eggdir, 'EGG-INFO', 'scripts')
os.mkdir(egg_info_scripts)
for entry in os.listdir(dist_data_scripts):
# Remove bytecode, as it's not properly handled
# during easy_install scripts install phase.
if entry.endswith('.pyc'):
os.unlink(os.path.join(dist_data_scripts, entry))
else:
os.rename(
os.path.join(dist_data_scripts, entry),
os.path.join(egg_info_scripts, entry),
)
os.rmdir(dist_data_scripts)
for subdir in filter(os.path.exists, (
os.path.join(dist_data, d)
for d in ('data', 'headers', 'purelib', 'platlib')
)):
unpack(subdir, destination_eggdir)
if os.path.exists(dist_data):
os.rmdir(dist_data)
@staticmethod
def _fix_namespace_packages(egg_info, destination_eggdir):
namespace_packages = os.path.join(
egg_info, 'namespace_packages.txt')
if os.path.exists(namespace_packages):
with open(namespace_packages) as fp:
namespace_packages = fp.read().split()
for mod in namespace_packages:
mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
mod_init = os.path.join(mod_dir, '__init__.py')
if os.path.exists(mod_dir) and not os.path.exists(mod_init):
with open(mod_init, 'w') as fp:
fp.write(NAMESPACE_PACKAGE_INIT)
|
kjw0106/GCM_app_server
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/wheel.py
|
145
|
"""
Support for installing and building the "wheel" binary package format.
"""
from __future__ import with_statement
import compileall
import csv
import functools
import hashlib
import os
import re
import shutil
import sys
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip.backwardcompat import ConfigParser, StringIO
from pip.exceptions import InvalidWheelFilename, UnsupportedWheel
from pip.locations import distutils_scheme
from pip.log import logger
from pip import pep425tags
from pip.util import call_subprocess, normalize_path, make_path_relative
from pip._vendor import pkg_resources
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
def rehash(path, algo='sha256', blocksize=1<<20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
block = f.read(blocksize)
while block:
length += len(block)
h.update(block)
block = f.read(blocksize)
digest = 'sha256='+urlsafe_b64encode(h.digest()).decode('latin1').rstrip('=')
return (digest, length)
try:
unicode
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
script = open(path, 'rb')
try:
firstline = script.readline()
if not firstline.startswith(binary('#!python')):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = binary('#!') + exename + binary(os.linesep)
rest = script.read()
finally:
script.close()
script = open(path, 'wb')
try:
script.write(firstline)
script.write(rest)
finally:
script.close()
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = ConfigParser.RawConfigParser()
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True):
"""Install a wheel"""
scheme = distutils_scheme(name, user=user, home=home, root=root)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
compileall.compile_dir(source, force=True, quiet=True)
def normpath(src, p):
return make_path_relative(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
if not os.path.exists(dest): # common for the 'include' path
os.makedirs(dest)
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base
and s.endswith('.dist-info')
# is self.req.project_name case preserving?
and s.lower().startswith(req.project_name.replace('-', '_').lower())):
assert not info_dir, 'Multiple .dist-info directories'
info_dir.append(destsubdir)
if not os.path.exists(destsubdir):
os.makedirs(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
shutil.move(srcfile, destfile)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadat 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [k for k in console
if re.match(r'easy_install(-\d\.\d)?$', k)]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(maker.make_multiple(['%s = %s' % kv for kv in console.items()]))
if len(gui) > 0:
generated.extend(maker.make_multiple(['%s = %s' % kv for kv in gui.items()], {'gui': True}))
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((f, h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.req import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base+'.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warn('Installing from a newer Wheel-Version (%s)'
% '.'.join(map(str, version)))
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename("%s is not a valid wheel filename." % filename)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set((x, y, z) for x in self.pyversions for y
in self.abis for z in self.plats)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, wheel_dir, build_options=[], global_options=[]):
self.requirement_set = requirement_set
self.finder = finder
self.wheel_dir = normalize_path(wheel_dir)
self.build_options = build_options
self.global_options = global_options
def _build_one(self, req):
"""Build one wheel."""
base_args = [
sys.executable, '-c',
"import setuptools;__file__=%r;"\
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % req.setup_py] + \
list(self.global_options)
logger.notify('Running setup.py bdist_wheel for %s' % req.name)
logger.notify('Destination directory: %s' % self.wheel_dir)
wheel_args = base_args + ['bdist_wheel', '-d', self.wheel_dir] + self.build_options
try:
call_subprocess(wheel_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed building wheel for %s' % req.name)
return False
def build(self):
"""Build wheels."""
#unpack and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = [req for req in reqset if not req.is_wheel]
if not buildset:
return
#build the wheels
logger.notify(
'Building wheels for collected packages: %s' %
','.join([req.name for req in buildset])
)
logger.indent += 2
build_success, build_failure = [], []
for req in buildset:
if self._build_one(req):
build_success.append(req)
else:
build_failure.append(req)
logger.indent -= 2
#notify sucess/failure
if build_success:
logger.notify('Successfully built %s' % ' '.join([req.name for req in build_success]))
if build_failure:
logger.notify('Failed to build %s' % ' '.join([req.name for req in build_failure]))
|
clemensv/qpid-proton
|
refs/heads/master
|
examples/python/abstract_server.py
|
9
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from proton_server import Server
class Application(Server):
def __init__(self, host, address):
super(Application, self).__init__(host, address)
def on_request(self, request, reply_to):
response = request.upper()
self.send(response, reply_to)
print("Request from: %s" % reply_to)
try:
Application("localhost:5672", "examples").run()
except KeyboardInterrupt: pass
|
UXE/local-edx
|
refs/heads/master
|
lms/djangoapps/instructor/views/instructor_task_helpers.py
|
133
|
"""
A collection of helper utility functions for working with instructor
tasks.
"""
import json
import logging
from util.date_utils import get_default_time_display
from bulk_email.models import CourseEmail
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from instructor_task.views import get_task_completion_info
log = logging.getLogger(__name__)
def email_error_information():
"""
Returns email information marked as None, used in event email
cannot be loaded
"""
expected_info = [
'created',
'sent_to',
'email',
'number_sent',
'requester',
]
return {info: None for info in expected_info}
def extract_email_features(email_task):
"""
From the given task, extract email content information
Expects that the given task has the following attributes:
* task_input (dict containing email_id and to_option)
* task_output (optional, dict containing total emails sent)
* requester, the user who executed the task
With this information, gets the corresponding email object from the
bulk emails table, and loads up a dict containing the following:
* created, the time the email was sent displayed in default time display
* sent_to, the group the email was delivered to
* email, dict containing the subject, id, and html_message of an email
* number_sent, int number of emails sent
* requester, the user who sent the emails
If task_input cannot be loaded, then the email cannot be loaded
and None is returned for these fields.
"""
# Load the task input info to get email id
try:
task_input_information = json.loads(email_task.task_input)
except ValueError:
log.error("Could not parse task input as valid json; task input: %s", email_task.task_input)
return email_error_information()
email = CourseEmail.objects.get(id=task_input_information['email_id'])
email_feature_dict = {
'created': get_default_time_display(email.created),
'sent_to': task_input_information['to_option'],
'requester': str(getattr(email_task, 'requester')),
}
features = ['subject', 'html_message', 'id']
email_info = {feature: unicode(getattr(email, feature)) for feature in features}
# Pass along email as an object with the information we desire
email_feature_dict['email'] = email_info
# Translators: number sent refers to the number of emails sent
number_sent = _('0 sent')
if hasattr(email_task, 'task_output') and email_task.task_output is not None:
try:
task_output = json.loads(email_task.task_output)
except ValueError:
log.error("Could not parse task output as valid json; task output: %s", email_task.task_output)
else:
if 'succeeded' in task_output and task_output['succeeded'] > 0:
num_emails = task_output['succeeded']
number_sent = ungettext(
"{num_emails} sent",
"{num_emails} sent",
num_emails
).format(num_emails=num_emails)
if 'failed' in task_output and task_output['failed'] > 0:
num_emails = task_output['failed']
number_sent += ", "
number_sent += ungettext(
"{num_emails} failed",
"{num_emails} failed",
num_emails
).format(num_emails=num_emails)
email_feature_dict['number_sent'] = number_sent
return email_feature_dict
def extract_task_features(task):
"""
Convert task to dict for json rendering.
Expects tasks have the following features:
* task_type (str, type of task)
* task_input (dict, input(s) to the task)
* task_id (str, celery id of the task)
* requester (str, username who submitted the task)
* task_state (str, state of task eg PROGRESS, COMPLETED)
* created (datetime, when the task was completed)
* task_output (optional)
"""
# Pull out information from the task
features = ['task_type', 'task_input', 'task_id', 'requester', 'task_state']
task_feature_dict = {feature: str(getattr(task, feature)) for feature in features}
# Some information (created, duration, status, task message) require additional formatting
task_feature_dict['created'] = task.created.isoformat()
# Get duration info, if known
duration_sec = 'unknown'
if hasattr(task, 'task_output') and task.task_output is not None:
try:
task_output = json.loads(task.task_output)
except ValueError:
log.error("Could not parse task output as valid json; task output: %s", task.task_output)
else:
if 'duration_ms' in task_output:
duration_sec = int(task_output['duration_ms'] / 1000.0)
task_feature_dict['duration_sec'] = duration_sec
# Get progress status message & success information
success, task_message = get_task_completion_info(task)
status = _("Complete") if success else _("Incomplete")
task_feature_dict['status'] = status
task_feature_dict['task_message'] = task_message
return task_feature_dict
|
aljim/deploymentmanager-samples
|
refs/heads/master
|
community/cloud-foundation/templates/stackdriver_metric_descriptor/stackdriver_metric_descriptor.py
|
1
|
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This template creates a Stackdriver Metric Descriptor. """
def generate_config(context):
""" Entry point for the deployment resources. """
resources = []
outputs = []
properties = context.properties
name = properties.get('name', context.env['name'])
metric_descriptor = {
'name': name,
'type': 'gcp-types/monitoring-v3:projects.metricDescriptors',
'properties': {}
}
required_properties = [
'type',
'metricKind',
'valueType',
'unit'
]
for prop in required_properties:
if prop in properties:
metric_descriptor['properties'][prop] = properties[prop]
# Optional properties:
optional_properties = ['displayName', 'labels', 'description', 'metadata']
for prop in optional_properties:
if prop in properties:
metric_descriptor['properties'][prop] = properties[prop]
resources.append(metric_descriptor)
# Output variables:
output_props = [
'name',
'type',
'labels',
'metricKind',
'valueType',
'unit',
'description',
'displayName',
'metadata'
]
for outprop in output_props:
output = {}
if outprop in properties:
output['name'] = outprop
output['value'] = '$(ref.{}.{})'.format(name, outprop)
outputs.append(output)
return {'resources': resources, 'outputs': outputs}
|
Junky-Josqu/BleyBley_py
|
refs/heads/master
|
game/lib/module.py
|
1
|
# -*- coding: iso-8859-1 -*-
#For different Modules
from os.path import expanduser
home = expanduser("~")
x = '%s/.mbwarband/last_module_warband' % (home)
file = open(x,'r+')
def native():
module = 'Native'
return module
def viking_conquest():
module = 'Viking Conquest'
return module
def napoleonic_wars():
module = 'Napoleonic Wars'
return module
|
lukasklein/paymill-python
|
refs/heads/master
|
samples/clients/list_clients.py
|
2
|
client_service = paymill_context.get_client_service()
clients_list = client_service.list()
|
attente/snapcraft
|
refs/heads/master
|
snaps_tests/demos_tests/test_ros.py
|
2
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import snaps_tests
import os
import subprocess
class ROSTestCase(snaps_tests.SnapsTestCase):
snap_content_dir = 'ros'
def test_ros(self):
snap_path = self.build_snap(self.snap_content_dir)
self.install_snap(snap_path, 'ros-example', '1.0')
# check that the hardcoded /usr/bin/python in rosversion
# is changed to using /usr/bin/env python
expected = b'#!/usr/bin/env python\n'
output = subprocess.check_output(
"sed -n '/env/p;1q' prime/usr/bin/rosversion",
cwd=os.path.join(self.path, self.snap_content_dir), shell=True)
self.assertEqual(output, expected)
|
EmadMokhtar/Django
|
refs/heads/master
|
django/contrib/sites/__init__.py
|
808
|
default_app_config = 'django.contrib.sites.apps.SitesConfig'
|
selimcr/pilargamboa
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
AlvinPH/StockTool
|
refs/heads/master
|
setup.py
|
1
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='StockTool',
version='0.1.0',
description='Stock Tool by Alvin Lai',
long_description=readme,
author='Alvin Lai',
author_email='[email protected]',
url='https://github.com/AlvinPH/StockTool',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
|
Theer108/invenio
|
refs/heads/master
|
invenio/modules/records/testsuite/functions/sync_meeting_names.py
|
33
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.modules.jsonalchemy.jsonext.functions.util_merge_fields_info_list \
import util_merge_fields_info_list
def sync_meeting_names(self, field_name, connected_field, action): # pylint: disable=W0613
"""
Sync corparate names content only when `__setitem__` or similar is used
"""
if action == 'set':
if field_name == 'corporate_names' and self.get('corporate_names'):
self.__setitem__('_first_corporate_name',
self['corporate_names'][0],
exclude=['connect'])
if self['corporate_names'][1:]:
self.__setitem__('_additional_corporate_names',
self['corporate_names'][1:],
exclude=['connect'])
elif field_name in ('_first_author', '_additional_authors'):
self.__setitem__(
'corporate_names',
util_merge_fields_info_list(self, ['_first_corporate_name',
'_additional_corporate_names']),
exclude=['connect'])
|
hlin117/statsmodels
|
refs/heads/master
|
statsmodels/graphics/regressionplots.py
|
20
|
'''Partial Regression plot and residual plots to find misspecification
Author: Josef Perktold
License: BSD-3
Created: 2011-01-23
update
2011-06-05 : start to convert example to usable functions
2011-10-27 : docstrings
'''
from statsmodels.compat.python import lrange, string_types, lzip, range
import numpy as np
import pandas as pd
from patsy import dmatrix
from statsmodels.regression.linear_model import OLS, GLS, WLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.generalized_estimating_equations import GEE
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.graphics import utils
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.tools.tools import maybe_unwrap_results
from statsmodels.base import model
from ._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
__all__ = ['plot_fit', 'plot_regress_exog', 'plot_partregress', 'plot_ccpr',
'plot_regress_exog', 'plot_partregress_grid', 'plot_ccpr_grid',
'add_lowess', 'abline_plot', 'influence_plot',
'plot_leverage_resid2', 'added_variable_resids',
'partial_resids', 'ceres_resids', 'plot_added_variable',
'plot_partial_residuals', 'plot_ceres_residuals']
#TODO: consider moving to influence module
def _high_leverage(results):
#TODO: replace 1 with k_constant
return 2. * (results.df_model + 1)/results.nobs
def add_lowess(ax, lines_idx=0, frac=.2, **lowess_kwargs):
"""
Add Lowess line to a plot.
Parameters
----------
ax : matplotlib Axes instance
The Axes to which to add the plot
lines_idx : int
This is the line on the existing plot to which you want to add
a smoothed lowess line.
frac : float
The fraction of the points to use when doing the lowess fit.
lowess_kwargs
Additional keyword arguments are passes to lowess.
Returns
-------
fig : matplotlib Figure instance
The figure that holds the instance.
"""
y0 = ax.get_lines()[lines_idx]._y
x0 = ax.get_lines()[lines_idx]._x
lres = lowess(y0, x0, frac=frac, **lowess_kwargs)
ax.plot(lres[:, 0], lres[:, 1], 'r', lw=1.5)
return ax.figure
def plot_fit(results, exog_idx, y_true=None, ax=None, **kwargs):
"""Plot fit against one regressor.
This creates one graph with the scatterplot of observed values compared to
fitted values.
Parameters
----------
results : result instance
result instance with resid, model.endog and model.exog as attributes
x_var : int or str
Name or index of regressor in exog matrix.
y_true : array_like
(optional) If this is not None, then the array is added to the plot
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
kwargs
The keyword arguments are passed to the plot command for the fitted
values points.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
Load the Statewide Crime data set and perform linear regression with
`poverty` and `hs_grad` as variables and `murder` as the response
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> data = sm.datasets.statecrime.load_pandas().data
>>> murder = data['murder']
>>> X = data[['poverty', 'hs_grad']]
>>> X["constant"] = 1
>>> y = murder
>>> model = sm.OLS(y, X)
>>> results = model.fit()
Create a plot just for the variable 'Poverty':
>>> fig, ax = plt.subplots()
>>> fig = sm.graphics.plot_fit(results, 0, ax=ax)
>>> ax.set_ylabel("Murder Rate")
>>> ax.set_xlabel("Poverty Level")
>>> ax.set_title("Linear Regression")
>>> plt.show()
.. plot:: plots/graphics_plot_fit_ex.py
"""
fig, ax = utils.create_mpl_ax(ax)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
#maybe add option for wendog, wexog
y = results.model.endog
x1 = results.model.exog[:, exog_idx]
x1_argsort = np.argsort(x1)
y = y[x1_argsort]
x1 = x1[x1_argsort]
ax.plot(x1, y, 'bo', label=results.model.endog_names)
if not y_true is None:
ax.plot(x1, y_true[x1_argsort], 'b-', label='True values')
title = 'Fitted values versus %s' % exog_name
prstd, iv_l, iv_u = wls_prediction_std(results)
ax.plot(x1, results.fittedvalues[x1_argsort], 'D', color='r',
label='fitted', **kwargs)
ax.vlines(x1, iv_l[x1_argsort], iv_u[x1_argsort], linewidth=1, color='k',
alpha=.7)
#ax.fill_between(x1, iv_l[x1_argsort], iv_u[x1_argsort], alpha=0.1,
# color='k')
ax.set_title(title)
ax.set_xlabel(exog_name)
ax.set_ylabel(results.model.endog_names)
ax.legend(loc='best', numpoints=1)
return fig
def plot_regress_exog(results, exog_idx, fig=None):
"""Plot regression results against one regressor.
This plots four graphs in a 2 by 2 figure: 'endog versus exog',
'residuals versus exog', 'fitted versus exog' and
'fitted plus residual versus exog'
Parameters
----------
results : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : matplotlib figure instance
"""
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
#maybe add option for wendog, wexog
y_name = results.model.endog_names
x1 = results.model.exog[:, exog_idx]
prstd, iv_l, iv_u = wls_prediction_std(results)
ax = fig.add_subplot(2, 2, 1)
ax.plot(x1, results.model.endog, 'o', color='b', alpha=0.9, label=y_name)
ax.plot(x1, results.fittedvalues, 'D', color='r', label='fitted',
alpha=.5)
ax.vlines(x1, iv_l, iv_u, linewidth=1, color='k', alpha=.7)
ax.set_title('Y and Fitted vs. X', fontsize='large')
ax.set_xlabel(exog_name)
ax.set_ylabel(y_name)
ax.legend(loc='best')
ax = fig.add_subplot(2, 2, 2)
ax.plot(x1, results.resid, 'o')
ax.axhline(y=0, color='black')
ax.set_title('Residuals versus %s' % exog_name, fontsize='large')
ax.set_xlabel(exog_name)
ax.set_ylabel("resid")
ax = fig.add_subplot(2, 2, 3)
exog_noti = np.ones(results.model.exog.shape[1], bool)
exog_noti[exog_idx] = False
exog_others = results.model.exog[:, exog_noti]
from pandas import Series
fig = plot_partregress(results.model.data.orig_endog,
Series(x1, name=exog_name,
index=results.model.data.row_labels),
exog_others, obs_labels=False, ax=ax)
ax.set_title('Partial regression plot', fontsize='large')
#ax.set_ylabel("Fitted values")
#ax.set_xlabel(exog_name)
ax = fig.add_subplot(2, 2, 4)
fig = plot_ccpr(results, exog_idx, ax=ax)
ax.set_title('CCPR Plot', fontsize='large')
#ax.set_xlabel(exog_name)
#ax.set_ylabel("Fitted values + resids")
fig.suptitle('Regression Plots for %s' % exog_name, fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.90)
return fig
def _partial_regression(endog, exog_i, exog_others):
"""Partial regression.
regress endog on exog_i conditional on exog_others
uses OLS
Parameters
----------
endog : array_like
exog : array_like
exog_others : array_like
Returns
-------
res1c : OLS results instance
(res1a, res1b) : tuple of OLS results instances
results from regression of endog on exog_others and of exog_i on
exog_others
"""
#FIXME: This function doesn't appear to be used.
res1a = OLS(endog, exog_others).fit()
res1b = OLS(exog_i, exog_others).fit()
res1c = OLS(res1a.resid, res1b.resid).fit()
return res1c, (res1a, res1b)
def plot_partregress(endog, exog_i, exog_others, data=None,
title_kwargs={}, obs_labels=True, label_kwargs={},
ax=None, ret_coords=False, **kwargs):
"""Plot partial regression for a single regressor.
Parameters
----------
endog : ndarray or string
endogenous or response variable. If string is given, you can use a
arbitrary translations as with a formula.
exog_i : ndarray or string
exogenous, explanatory variable. If string is given, you can use a
arbitrary translations as with a formula.
exog_others : ndarray or list of strings
other exogenous, explanatory variables. If a list of strings is given,
each item is a term in formula. You can use a arbitrary translations
as with a formula. The effect of these variables will be removed by
OLS regression.
data : DataFrame, dict, or recarray
Some kind of data structure with names if the other variables are
given as strings.
title_kwargs : dict
Keyword arguments to pass on for the title. The key to control the
fonts is fontdict.
obs_labels : bool or array-like
Whether or not to annotate the plot points with their observation
labels. If obs_labels is a boolean, the point labels will try to do
the right thing. First it will try to use the index of data, then
fall back to the index of exog_i. Alternatively, you may give an
array-like object corresponding to the obseveration numbers.
labels_kwargs : dict
Keyword arguments that control annotate for the observation labels.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
ret_coords : bool
If True will return the coordinates of the points in the plot. You
can use this to add your own annotations.
kwargs
The keyword arguments passed to plot for the points.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
coords : list, optional
If ret_coords is True, return a tuple of arrays (x_coords, y_coords).
Notes
-----
The slope of the fitted line is the that of `exog_i` in the full
multiple regression. The individual points can be used to assess the
influence of points on the estimated coefficient.
See Also
--------
plot_partregress_grid : Plot partial regression for a set of regressors.
"""
#NOTE: there is no interaction between possible missing data and
#obs_labels yet, so this will need to be tweaked a bit for this case
fig, ax = utils.create_mpl_ax(ax)
# strings, use patsy to transform to data
if isinstance(endog, string_types):
endog = dmatrix(endog + "-1", data)
if isinstance(exog_others, string_types):
RHS = dmatrix(exog_others, data)
elif isinstance(exog_others, list):
RHS = "+".join(exog_others)
RHS = dmatrix(RHS, data)
else:
RHS = exog_others
RHS_isemtpy = False
if isinstance(RHS, np.ndarray) and RHS.size==0:
RHS_isemtpy = True
elif isinstance(RHS, pd.DataFrame) and RHS.empty:
RHS_isemtpy = True
if isinstance(exog_i, string_types):
exog_i = dmatrix(exog_i + "-1", data)
# all arrays or pandas-like
if RHS_isemtpy:
ax.plot(endog, exog_i, 'o', **kwargs)
fitted_line = OLS(endog, exog_i).fit()
x_axis_endog_name = 'x' if isinstance(exog_i, np.ndarray) else exog_i.name
y_axis_endog_name = 'y' if isinstance(endog, np.ndarray) else endog.design_info.column_names[0]
else:
res_yaxis = OLS(endog, RHS).fit()
res_xaxis = OLS(exog_i, RHS).fit()
xaxis_resid = res_xaxis.resid
yaxis_resid = res_yaxis.resid
x_axis_endog_name = res_xaxis.model.endog_names
y_axis_endog_name = res_yaxis.model.endog_names
ax.plot(xaxis_resid, yaxis_resid, 'o', **kwargs)
fitted_line = OLS(yaxis_resid, xaxis_resid).fit()
fig = abline_plot(0, fitted_line.params[0], color='k', ax=ax)
if x_axis_endog_name == 'y': # for no names regression will just get a y
x_axis_endog_name = 'x' # this is misleading, so use x
ax.set_xlabel("e(%s | X)" % x_axis_endog_name)
ax.set_ylabel("e(%s | X)" % y_axis_endog_name)
ax.set_title('Partial Regression Plot', **title_kwargs)
#NOTE: if we want to get super fancy, we could annotate if a point is
#clicked using this widget
#http://stackoverflow.com/questions/4652439/
#is-there-a-matplotlib-equivalent-of-matlabs-datacursormode/
#4674445#4674445
if obs_labels is True:
if data is not None:
obs_labels = data.index
elif hasattr(exog_i, "index"):
obs_labels = exog_i.index
else:
obs_labels = res_xaxis.model.data.row_labels
#NOTE: row_labels can be None.
#Maybe we should fix this to never be the case.
if obs_labels is None:
obs_labels = lrange(len(exog_i))
if obs_labels is not False: # could be array-like
if len(obs_labels) != len(exog_i):
raise ValueError("obs_labels does not match length of exog_i")
label_kwargs.update(dict(ha="center", va="bottom"))
ax = utils.annotate_axes(lrange(len(obs_labels)), obs_labels,
lzip(res_xaxis.resid, res_yaxis.resid),
[(0, 5)] * len(obs_labels), "x-large", ax=ax,
**label_kwargs)
if ret_coords:
return fig, (res_xaxis.resid, res_yaxis.resid)
else:
return fig
def plot_partregress_grid(results, exog_idx=None, grid=None, fig=None):
"""Plot partial regression for a set of regressors.
Parameters
----------
results : results instance
A regression model results instance
exog_idx : None, list of ints, list of strings
(column) indices of the exog used in the plot, default is all.
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
Notes
-----
A subplot is created for each explanatory variable given by exog_idx.
The partial regression plot shows the relationship between the response
and the given explanatory variable after removing the effect of all other
explanatory variables in exog.
See Also
--------
plot_partregress : Plot partial regression for a single regressor.
plot_ccpr
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
"""
import pandas
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
#maybe add option for using wendog, wexog instead
y = pandas.Series(results.model.endog, name=results.model.endog_names)
exog = results.model.exog
k_vars = exog.shape[1]
#this function doesn't make sense if k_vars=1
if not grid is None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
title_kwargs = {"fontdict" : {"fontsize" : 'small'}}
else:
nrows = len(exog_idx)
ncols = 1
title_kwargs = {}
# for indexing purposes
other_names = np.array(results.model.exog_names)
for i, idx in enumerate(exog_idx):
others = lrange(k_vars)
others.pop(idx)
exog_others = pandas.DataFrame(exog[:, others],
columns=other_names[others])
ax = fig.add_subplot(nrows, ncols, i+1)
plot_partregress(y, pandas.Series(exog[:, idx],
name=other_names[idx]),
exog_others, ax=ax, title_kwargs=title_kwargs,
obs_labels=False)
ax.set_title("")
fig.suptitle("Partial Regression Plot", fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.95)
return fig
def plot_ccpr(results, exog_idx, ax=None):
"""Plot CCPR against one regressor.
Generates a CCPR (component and component-plus-residual) plot.
Parameters
----------
results : result instance
A regression results instance.
exog_idx : int or string
Exogenous, explanatory variable. If string is given, it should
be the variable name that you want to use, and you can use arbitrary
translations as with a formula.
ax : Matplotlib AxesSubplot instance, optional
If given, it is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_ccpr_grid : Creates CCPR plot for multiple regressors in a plot grid.
Notes
-----
The CCPR plot provides a way to judge the effect of one regressor on the
response variable by taking into account the effects of the other
independent variables. The partial residuals plot is defined as
Residuals + B_i*X_i versus X_i. The component adds the B_i*X_i versus
X_i to show where the fitted line would lie. Care should be taken if X_i
is highly correlated with any of the other independent variables. If this
is the case, the variance evident in the plot will be an underestimate of
the true variance.
References
----------
http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig, ax = utils.create_mpl_ax(ax)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
x1 = results.model.exog[:, exog_idx]
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*results.params[exog_idx]
ax.plot(x1, x1beta + results.resid, 'o')
from statsmodels.tools.tools import add_constant
mod = OLS(x1beta, add_constant(x1)).fit()
params = mod.params
fig = abline_plot(*params, **dict(ax=ax))
#ax.plot(x1, x1beta, '-')
ax.set_title('Component and component plus residual plot')
ax.set_ylabel("Residual + %s*beta_%d" % (exog_name, exog_idx))
ax.set_xlabel("%s" % exog_name)
return fig
def plot_ccpr_grid(results, exog_idx=None, grid=None, fig=None):
"""Generate CCPR plots against a set of regressors, plot in a grid.
Generates a grid of CCPR (component and component-plus-residual) plots.
Parameters
----------
results : result instance
uses exog and params of the result instance
exog_idx : None or list of int
(column) indices of the exog used in the plot
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Notes
-----
Partial residual plots are formed as::
Res + Betahat(i)*Xi versus Xi
and CCPR adds::
Betahat(i)*Xi versus Xi
See Also
--------
plot_ccpr : Creates CCPR plot for a single regressor.
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
if grid is not None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
else:
nrows = len(exog_idx)
ncols = 1
seen_constant = 0
for i, idx in enumerate(exog_idx):
if results.model.exog[:, idx].var() == 0:
seen_constant = 1
continue
ax = fig.add_subplot(nrows, ncols, i+1-seen_constant)
fig = plot_ccpr(results, exog_idx=idx, ax=ax)
ax.set_title("")
fig.suptitle("Component-Component Plus Residual Plot", fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.95)
return fig
def abline_plot(intercept=None, slope=None, horiz=None, vert=None,
model_results=None, ax=None, **kwargs):
"""
Plots a line given an intercept and slope.
intercept : float
The intercept of the line
slope : float
The slope of the line
horiz : float or array-like
Data for horizontal lines on the y-axis
vert : array-like
Data for verterical lines on the x-axis
model_results : statsmodels results instance
Any object that has a two-value `params` attribute. Assumed that it
is (intercept, slope)
ax : axes, optional
Matplotlib axes instance
kwargs
Options passed to matplotlib.pyplot.plt
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> np.random.seed(12345)
>>> X = sm.add_constant(np.random.normal(0, 20, size=30))
>>> y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
>>> mod = sm.OLS(y,X).fit()
>>> fig = sm.graphics.abline_plot(model_results=mod)
>>> ax = fig.axes[0]
>>> ax.scatter(X[:,1], y)
>>> ax.margins(.1)
>>> import matplotlib.pyplot as plt
>>> plt.show()
"""
if ax is not None: # get axis limits first thing, don't change these
x = ax.get_xlim()
else:
x = None
fig, ax = utils.create_mpl_ax(ax)
if model_results:
intercept, slope = model_results.params
if x is None:
x = [model_results.model.exog[:, 1].min(),
model_results.model.exog[:, 1].max()]
else:
if not (intercept is not None and slope is not None):
raise ValueError("specify slope and intercepty or model_results")
if x is None:
x = ax.get_xlim()
data_y = [x[0]*slope+intercept, x[1]*slope+intercept]
ax.set_xlim(x)
#ax.set_ylim(y)
from matplotlib.lines import Line2D
class ABLine2D(Line2D):
def update_datalim(self, ax):
ax.set_autoscale_on(False)
children = ax.get_children()
abline = [children[i] for i in range(len(children))
if isinstance(children[i], ABLine2D)][0]
x = ax.get_xlim()
y = [x[0]*slope+intercept, x[1]*slope+intercept]
abline.set_data(x, y)
ax.figure.canvas.draw()
#TODO: how to intercept something like a margins call and adjust?
line = ABLine2D(x, data_y, **kwargs)
ax.add_line(line)
ax.callbacks.connect('xlim_changed', line.update_datalim)
ax.callbacks.connect('ylim_changed', line.update_datalim)
if horiz:
ax.hline(horiz)
if vert:
ax.vline(vert)
return fig
def influence_plot(results, external=True, alpha=.05, criterion="cooks",
size=48, plot_alpha=.75, ax=None, **kwargs):
"""
Plot of influence in regression. Plots studentized resids vs. leverage.
Parameters
----------
results : results instance
A fitted model.
external : bool
Whether to use externally or internally studentized residuals. It is
recommended to leave external as True.
alpha : float
The alpha value to identify large studentized residuals. Large means
abs(resid_studentized) > t.ppf(1-alpha/2, dof=results.df_resid)
criterion : str {'DFFITS', 'Cooks'}
Which criterion to base the size of the points on. Options are
DFFITS or Cook's D.
size : float
The range of `criterion` is mapped to 10**2 - size**2 in points.
plot_alpha : float
The `alpha` of the plotted points.
ax : matplotlib Axes instance
An instance of a matplotlib Axes.
Returns
-------
fig : matplotlib figure
The matplotlib figure that contains the Axes.
Notes
-----
Row labels for the observations in which the leverage, measured by the
diagonal of the hat matrix, is high or the residuals are large, as the
combination of large residuals and a high influence value indicates an
influence point. The value of large residuals can be controlled using the
`alpha` parameter. Large leverage points are identified as
hat_i > 2 * (df_model + 1)/nobs.
"""
fig, ax = utils.create_mpl_ax(ax)
infl = results.get_influence()
if criterion.lower().startswith('dff'):
psize = infl.cooks_distance[0]
elif criterion.lower().startswith('coo'):
psize = np.abs(infl.dffits[0])
else:
raise ValueError("Criterion %s not understood" % criterion)
# scale the variables
#TODO: what is the correct scaling and the assumption here?
#we want plots to be comparable across different plots
#so we would need to use the expected distribution of criterion probably
old_range = np.ptp(psize)
new_range = size**2 - 8**2
psize = (psize - psize.min()) * new_range/old_range + 8**2
leverage = infl.hat_matrix_diag
if external:
resids = infl.resid_studentized_external
else:
resids = infl.resid_studentized_internal
from scipy import stats
cutoff = stats.t.ppf(1.-alpha/2, results.df_resid)
large_resid = np.abs(resids) > cutoff
large_leverage = leverage > _high_leverage(results)
large_points = np.logical_or(large_resid, large_leverage)
ax.scatter(leverage, resids, s=psize, alpha=plot_alpha)
# add point labels
labels = results.model.data.row_labels
if labels is None:
labels = lrange(len(resids))
ax = utils.annotate_axes(np.where(large_points)[0], labels,
lzip(leverage, resids),
lzip(-(psize/2)**.5, (psize/2)**.5), "x-large",
ax)
#TODO: make configurable or let people do it ex-post?
font = {"fontsize" : 16, "color" : "black"}
ax.set_ylabel("Studentized Residuals", **font)
ax.set_xlabel("H Leverage", **font)
ax.set_title("Influence Plot", **font)
return fig
def plot_leverage_resid2(results, alpha=.05, label_kwargs={}, ax=None,
**kwargs):
"""
Plots leverage statistics vs. normalized residuals squared
Parameters
----------
results : results instance
A regression results instance
alpha : float
Specifies the cut-off for large-standardized residuals. Residuals
are assumed to be distributed N(0, 1) with alpha=alpha.
label_kwargs : dict
The keywords to pass to annotate for the labels.
ax : Axes instance
Matplotlib Axes instance
Returns
-------
fig : matplotlib Figure
A matplotlib figure instance.
"""
from scipy.stats import zscore, norm
fig, ax = utils.create_mpl_ax(ax)
infl = results.get_influence()
leverage = infl.hat_matrix_diag
resid = zscore(results.resid)
ax.plot(resid**2, leverage, 'o', **kwargs)
ax.set_xlabel("Normalized residuals**2")
ax.set_ylabel("Leverage")
ax.set_title("Leverage vs. Normalized residuals squared")
large_leverage = leverage > _high_leverage(results)
#norm or t here if standardized?
cutoff = norm.ppf(1.-alpha/2)
large_resid = np.abs(resid) > cutoff
labels = results.model.data.row_labels
if labels is None:
labels = lrange(results.nobs)
index = np.where(np.logical_or(large_leverage, large_resid))[0]
ax = utils.annotate_axes(index, labels, lzip(resid**2, leverage),
[(0, 5)]*int(results.nobs), "large",
ax=ax, ha="center", va="bottom")
ax.margins(.075, .075)
return fig
def plot_added_variable(results, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None, ax=None):
# Docstring attached below
model = results.model
fig, ax = utils.create_mpl_ax(ax)
endog_resid, focus_exog_resid =\
added_variable_resids(results, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs)
ax.plot(focus_exog_resid, endog_resid, 'o', alpha=0.6)
ax.set_title('Added variable plot', fontsize='large')
if type(focus_exog) is str:
xname = focus_exog
else:
xname = model.exog_names[focus_exog]
ax.set_xlabel(xname, size=15)
ax.set_ylabel(model.endog_names + " residuals", size=15)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def plot_partial_residuals(results, focus_exog, ax=None):
# Docstring attached below
model = results.model
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
pr = partial_resids(results, focus_exog)
focus_exog_vals = results.model.exog[:, focus_col]
fig, ax = utils.create_mpl_ax(ax)
ax.plot(focus_exog_vals, pr, 'o', alpha=0.6)
ax.set_title('Partial residuals plot', fontsize='large')
if type(focus_exog) is str:
xname = focus_exog
else:
xname = model.exog_names[focus_exog]
ax.set_xlabel(xname, size=15)
ax.set_ylabel("Component plus residual", size=15)
return fig
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def plot_ceres_residuals(results, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
model = results.model
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
presid = ceres_resids(results, focus_exog, frac=frac,
cond_means=cond_means)
focus_exog_vals = model.exog[:, focus_col]
fig, ax = utils.create_mpl_ax(ax)
ax.plot(focus_exog_vals, presid, 'o', alpha=0.6)
ax.set_title('CERES residuals plot', fontsize='large')
ax.set_xlabel(focus_exog, size=15)
ax.set_ylabel("Component plus residual", size=15)
return fig
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def ceres_resids(results, focus_exog, frac=0.66, cond_means=None):
"""
Calculate the CERES residuals (Conditional Expectation Partial
Residuals) for a fitted model.
Parameters
----------
results : model results instance
The fitted model for which the CERES residuals are calculated.
focus_exog : int
The column of results.model.exog used as the 'focus variable'.
frac : float, optional
Lowess smoothing parameter for estimating the conditional
means. Not used if `cond_means` is provided.
cond_means : array-like, optional
If provided, the columns of this array are the conditional
means E[exog | focus exog], where exog ranges over some
or all of the columns of exog other than focus exog. If
this is an empty nx0 array, the conditional means are
treated as being zero. If None, the conditional means are
estimated.
Returns
-------
An array containing the CERES residuals.
Notes
-----
If `cond_means` is not provided, it is obtained by smoothing each
column of exog (except the focus column) against the focus column.
Currently only supports GLM, GEE, and OLS models.
"""
model = results.model
if not isinstance(model, (GLM, GEE, OLS)):
raise ValueError("ceres residuals not available for %s" %
model.__class__.__name__)
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
# Indices of non-focus columns
ix_nf = range(len(results.params))
ix_nf = list(ix_nf)
ix_nf.pop(focus_col)
nnf = len(ix_nf)
# Estimate the conditional means if not provided.
if cond_means is None:
# Below we calculate E[x | focus] where x is each column other
# than the focus column. We don't want the intercept when we do
# this so we remove it here.
pexog = model.exog[:, ix_nf]
pexog -= pexog.mean(0)
u, s, vt = np.linalg.svd(pexog, 0)
ii = np.flatnonzero(s > 1e-6)
pexog = u[:, ii]
fcol = model.exog[:, focus_col]
cond_means = np.empty((len(fcol), pexog.shape[1]))
for j in range(pexog.shape[1]):
# Get the fitted values for column i given the other
# columns (skip the intercept).
y0 = pexog[:, j]
cf = lowess(y0, fcol, frac=frac, return_sorted=False)
cond_means[:, j] = cf
new_exog = np.concatenate((model.exog[:, ix_nf], cond_means), axis=1)
# Refit the model using the adjusted exog values
klass = model.__class__
init_kwargs = model._get_init_kwds()
new_model = klass(model.endog, new_exog, **init_kwargs)
new_result = new_model.fit()
# The partial residual, with respect to l(x2) (notation of Cook 1998)
presid = model.endog - new_result.fittedvalues
if isinstance(model, (GLM, GEE)):
presid *= model.family.link.deriv(new_result.fittedvalues)
if new_exog.shape[1] > nnf:
presid += np.dot(new_exog[:, nnf:], new_result.params[nnf:])
return presid
def partial_resids(results, focus_exog):
"""
Returns partial residuals for a fitted model with respect to a
'focus predictor'.
Parameters
----------
results : results instance
A fitted regression model.
focus col : int
The column index of model.exog with respect to which the
partial residuals are calculated.
Returns
-------
An array of partial residuals.
References
----------
RD Cook and R Croos-Dabrera (1998). Partial residual plots in
generalized linear models. Journal of the American Statistical
Association, 93:442.
"""
# TODO: could be a method of results
# TODO: see Cook et al (1998) for a more general definition
# The calculation follows equation (8) from Cook's paper.
model = results.model
resid = model.endog - results.predict()
if isinstance(model, (GLM, GEE)):
resid *= model.family.link.deriv(results.fittedvalues)
elif isinstance(model, (OLS, GLS, WLS)):
pass # No need to do anything
else:
raise ValueError("Partial residuals for '%s' not implemented."
% type(model))
if type(focus_exog) is str:
focus_col = model.exog_names.index(focus_exog)
else:
focus_col = focus_exog
focus_val = results.params[focus_col] * model.exog[:, focus_col]
return focus_val + resid
def added_variable_resids(results, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None):
"""
Residualize the endog variable and a 'focus' exog variable in a
regression model with respect to the other exog variables.
Parameters
----------
results : regression results instance
A fitted model including the focus exog and all other
predictors of interest.
focus_exog : integer or string
The column of results.model.exog or a variable name that is
to be residualized against the other predictors.
resid_type : string
The type of residuals to use for the dependent variable. If
None, uses `resid_deviance` for GLM/GEE and `resid` otherwise.
use_glm_weights : bool
Only used if the model is a GLM or GEE. If True, the
residuals for the focus predictor are computed using WLS, with
the weights obtained from the IRLS calculations for fitting
the GLM. If False, unweighted regression is used.
fit_kwargs : dict, optional
Keyword arguments to be passed to fit when refitting the
model.
Returns
-------
endog_resid : array-like
The residuals for the original exog
focus_exog_resid : array-like
The residuals for the focus predictor
Notes
-----
The 'focus variable' residuals are always obtained using linear
regression.
Currently only GLM, GEE, and OLS models are supported.
"""
model = results.model
if not isinstance(model, (GEE, GLM, OLS)):
raise ValueError("model type %s not supported for added variable residuals" %
model.__class__.__name__)
exog = model.exog
endog = model.endog
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
focus_exog_vals = exog[:, focus_col]
# Default residuals
if resid_type is None:
if isinstance(model, (GEE, GLM)):
resid_type = "resid_deviance"
else:
resid_type = "resid"
ii = range(exog.shape[1])
ii = list(ii)
ii.pop(focus_col)
reduced_exog = exog[:, ii]
start_params = results.params[ii]
klass = model.__class__
kwargs = model._get_init_kwds()
new_model = klass(endog, reduced_exog, **kwargs)
args = {"start_params": start_params}
if fit_kwargs is not None:
args.update(fit_kwargs)
new_result = new_model.fit(**args)
if not new_result.converged:
raise ValueError("fit did not converge when calculating added variable residuals")
try:
endog_resid = getattr(new_result, resid_type)
except AttributeError:
raise ValueError("'%s' residual type not available" % resid_type)
import statsmodels.regression.linear_model as lm
if isinstance(model, (GLM, GEE)) and use_glm_weights:
weights = model.family.weights(results.fittedvalues)
if hasattr(model, "data_weights"):
weights = weights * model.data_weights
lm_results = lm.WLS(focus_exog_vals, reduced_exog, weights).fit()
else:
lm_results = lm.OLS(focus_exog_vals, reduced_exog).fit()
focus_exog_resid = lm_results.resid
return endog_resid, focus_exog_resid
|
dagnir/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/XMLHttpRequest/resources/corsenabled.py
|
187
|
import time
def main(request, response):
headers = [("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Credentials", "true"),
("Access-Control-Allow-Methods", "GET, POST, PUT, FOO"),
("Access-Control-Allow-Headers", "x-test, x-foo"),
("Access-Control-Expose-Headers", "x-request-method, x-request-content-type, x-request-query, x-request-content-length")]
if "delay" in request.GET:
delay = int(request.GET.first("delay"))
time.sleep(delay)
headers.append(("X-Request-Method", request.method))
headers.append(("X-Request-Query", request.url_parts.query if request.url_parts.query else "NO"))
headers.append(("X-Request-Content-Length", request.headers.get("Content-Length", "NO")))
headers.append(("X-Request-Content-Type", request.headers.get("Content-Type", "NO")))
return headers, "Test"
|
csulb-cecs424-2016sp/Lectures
|
refs/heads/master
|
Python/Functions/dictionaries.py
|
20
|
# A "dictionary" is a map from keys to values
d = {"key1" : 5, "key2" : "Hello", 7 : 100}
print(d["key1"])
print(d["key2"])
print(d[7])
emp = {"name": "Neal", "salary": "10000", "age": 93}
print(emp)
# Iterate through the keys of the dictionary
for i in emp:
print(emp[i])
|
jmacmahon/invenio
|
refs/heads/elasticsearch_logging
|
modules/miscutil/lib/solrutils_config.py
|
16
|
# This file is part of Invenio.
# Copyright (C) 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# Syntax (lower, upper, replacement): replace char if in range(lower, upper + 1)
CFG_SOLR_INVALID_CHAR_RANGES = ((0, 8, ' '), (11, 12, ' '), (14, 31, ' '), (55296, 57343, ''), (65534, 65535, ''))
|
perryjrandall/arsenalsuite
|
refs/heads/master
|
cpp/lib/PyQt4/examples/designer/plugins/python/polygonwidgetplugin.py
|
20
|
#!/usr/bin/env python
"""
polygonwidgetplugin.py
A polygon widget custom widget plugin for Qt Designer.
Copyright (C) 2006 David Boddie <[email protected]>
Copyright (C) 2005-2006 Trolltech ASA. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from PyQt4 import QtGui, QtDesigner
from polygonwidget import PolygonWidget
class PolygonWidgetPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
"""PolygonWidgetPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin)
Provides a Python custom plugin for Qt Designer by implementing the
QDesignerCustomWidgetPlugin via a PyQt-specific custom plugin class.
"""
# The __init__() method is only used to set up the plugin and define its
# initialized variable.
def __init__(self, parent=None):
super(PolygonWidgetPlugin, self).__init__(parent)
self.initialized = False
# The initialize() and isInitialized() methods allow the plugin to set up
# any required resources, ensuring that this can only happen once for each
# plugin.
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
# This factory method creates new instances of our custom widget with the
# appropriate parent.
def createWidget(self, parent):
return PolygonWidget(parent)
# This method returns the name of the custom widget class that is provided
# by this plugin.
def name(self):
return "PolygonWidget"
# Returns the name of the group in Qt Designer's widget box that this
# widget belongs to.
def group(self):
return "PyQt Examples"
# Returns the icon used to represent the custom widget in Qt Designer's
# widget box.
def icon(self):
return QtGui.QIcon(_logo_pixmap)
# Returns a short description of the custom widget for use in a tool tip.
def toolTip(self):
return ""
# Returns a short description of the custom widget for use in a "What's
# This?" help message for the widget.
def whatsThis(self):
return ""
# Returns True if the custom widget acts as a container for other widgets;
# otherwise returns False. Note that plugins for custom containers also
# need to provide an implementation of the QDesignerContainerExtension
# interface if they need to add custom editing support to Qt Designer.
def isContainer(self):
return False
# Returns an XML description of a custom widget instance that describes
# default values for its properties. Each custom widget created by this
# plugin will be configured using this description.
def domXml(self):
return '<widget class="PolygonWidget" name="polygonWidget" />\n'
# Returns the module containing the custom widget class. It may include
# a module path.
def includeFile(self):
return "polygonwidget"
# Define the image used for the icon.
_logo_16x16_xpm = [
"16 16 46 1",
". c #a5a5dc",
"l c #a69fd6",
"k c #a7a5da",
"h c #a7a6dc",
"a c #a7a7de",
"Q c #a8a5da",
"s c #a9a7d7",
"R c #a9a9e0",
"z c #abaad4",
"E c #afafda",
"M c #afafdb",
"K c #b0a8e2",
"o c #b1afe4",
"p c #b2b2d7",
"# c #b2b2ed",
"i c #b39eb6",
"F c #b3b3e1",
"e c #b4b4ef",
"t c #b58bab",
"d c #b6b6f2",
"n c #b798b8",
"P c #b798b9",
"c c #b8b6f2",
"D c #b8b89c",
"m c #b9648d",
"J c #ba84b0",
"A c #bdbdfb",
"f c #bfbffe",
"g c #c06996",
"b c #c0c0ff",
"B c #cbb889",
"L c #cbb989",
"O c #cfcf87",
"I c #d09585",
"w c #d0cf86",
"x c #dede81",
"G c #e8e87c",
"q c #edde7b",
"N c #f1e07b",
"v c #f2e07b",
"H c #f6e57c",
"j c #fb917e",
"u c #ffb580",
"r c #ffda80",
"C c #fffe80",
"y c #ffff80",
".##############a",
"#bbbbbbbbcdbbbbe",
"#bbbbbbbfghbbbbe",
"#bbbbbbbijkbbbbe",
"#blmnobpqrsbbbbe",
"#bbtuvwxyyzbbbbe",
"#bbABCyyyyDEfbbe",
"#bbbFGyyyyyHIJKe",
"#bbbFGyyyyyHIJKe",
"#bbALCyyyyDMfbbe",
"#bbtuNOxyyzbbbbe",
"#blmPobpqrsbbbbe",
"#bbbbbbbijQbbbbe",
"#bbbbbbbfghbbbbe",
"#bbbbbbbbcdbbbbe",
"aeeeeeeeeeeeeeeR"]
_logo_pixmap = QtGui.QPixmap(_logo_16x16_xpm)
|
VanirAOSP/external_chromium_org
|
refs/heads/kk44
|
tools/telemetry/telemetry/core/backends/chrome/inspector_page_unittest.py
|
24
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
from telemetry.unittest import tab_test_case
class InspectorPageTest(tab_test_case.TabTestCase):
def __init__(self, *args):
super(InspectorPageTest, self).__init__(*args)
def setUp(self):
super(InspectorPageTest, self).setUp()
self._browser.SetHTTPServerDirectories(util.GetUnittestDataDir())
def testPageNavigateToNormalUrl(self):
self._tab.Navigate(self._browser.http_server.UrlOf('blank.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
def testCustomActionToNavigate(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('page_with_link.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/page_with_link.html')
custom_action_called = [False]
def CustomAction():
custom_action_called[0] = True
self._tab.ExecuteJavaScript('document.getElementById("clickme").click();')
self._tab.PerformActionAndWaitForNavigate(CustomAction)
self.assertTrue(custom_action_called[0])
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
def testGetCookieByName(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('blank.html'))
self._tab.WaitForDocumentReadyStateToBeComplete()
self._tab.ExecuteJavaScript('document.cookie="foo=bar"')
self.assertEquals(self._tab.GetCookieByName('foo'), 'bar')
def testScriptToEvaluateOnCommit(self):
self._tab.Navigate(
self._browser.http_server.UrlOf('blank.html'),
script_to_evaluate_on_commit='var foo = "bar";')
self._tab.WaitForDocumentReadyStateToBeComplete()
self.assertEquals(self._tab.EvaluateJavaScript('foo'), 'bar')
|
schelleg/PYNQ
|
refs/heads/master
|
pynq/pl_server/server.py
|
2
|
# Copyright (c) 2016-2021, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from multiprocessing.connection import Listener
from multiprocessing.connection import Client
from copy import deepcopy
import os
import threading
from pynq.devicetree import DeviceTreeSegment
from pynq.devicetree import get_dtbo_base_name
from .hwh_parser import HWH, get_hwh_name
__author__ = "Yun Rock Qu, Peter Ogden"
__copyright__ = "Copyright 2019, Xilinx"
__email__ = "[email protected]"
# Overlay constants
PYNQ_PATH = os.path.dirname(os.path.realpath(__file__))
PL_SERVER_TEMPLATE = '/tmp/pynq.{}.socket'
def clear_state(dict_in):
"""Clear the state information for a given dictionary.
Parameters
----------
dict_in : dict
Input dictionary to be cleared.
"""
if type(dict_in) is dict:
for i in dict_in:
if 'state' in dict_in[i]:
dict_in[i]['state'] = None
return dict_in
class DeviceClient:
"""Class to access the PL server
The properties of the class access the most recent version
from the PL server and are read-only. All updating of the
PL server is performed by methods.
"""
@staticmethod
def accessible(tag):
try:
client = DeviceClient(tag)
client.client_request()
client.server_update()
return True
except (ConnectionError, PermissionError):
return False
def __init__(self, tag, key=b'xilinx'):
"""Create a new instance of the PL server
Parameters
----------
tag : string or path
The unique identifier of the device
key : bytes
The authentication key for the server
"""
self._ip_dict = {}
self._gpio_dict = {}
self._interrupt_controllers = {}
self._interrupt_pins = {}
self._hierarchy_dict = {}
self._devicetree_dict = {}
self._address = PL_SERVER_TEMPLATE.format(tag)
self._key = key
self._timestamp = None
self._bitfile_name = None
@property
def ip_dict(self):
"""The getter for the attribute `ip_dict`.
Returns
-------
dict
The dictionary storing addressable IP instances; can be empty.
"""
self.client_request()
self.server_update()
return self._ip_dict
@property
def gpio_dict(self):
"""The getter for the attribute `gpio_dict`.
Returns
-------
dict
The dictionary storing the PS GPIO pins.
"""
self.client_request()
self.server_update()
return self._gpio_dict
@property
def interrupt_pins(self):
"""The getter for the attribute `interrupt_pins`.
Returns
-------
dict
The dictionary storing the interrupt endpoint information.
"""
self.client_request()
self.server_update()
return self._interrupt_pins
@property
def interrupt_controllers(self):
"""The getter for the attribute `interrupt_controllers`.
Returns
-------
dict
The dictionary storing interrupt controller information.
"""
self.client_request()
self.server_update()
return self._interrupt_controllers
@property
def hierarchy_dict(self):
"""The getter for the attribute `hierarchy_dict`
Returns
-------
dict
The dictionary containing the hierarchies in the design
"""
self.client_request()
self.server_update()
return self._hierarchy_dict
@property
def devicetree_dict(self):
"""The getter for the attribute `devicetree_dict`
Returns
-------
dict
The dictionary containing the device tree blobs.
"""
self.client_request()
self.server_update()
return self._devicetree_dict
@property
def bitfile_name(self):
"""The getter for the attribute `bitfile_name`.
Returns
-------
str
The absolute path of the bitstream currently on PL.
"""
self.client_request()
self.server_update()
return self._bitfile_name
@property
def timestamp(self):
"""The getter for the attribute `timestamp`.
Returns
-------
str
Bitstream download timestamp.
"""
self.client_request()
self.server_update()
return self._timestamp
@property
def mem_dict(self):
"""The getter for the attribute `mem_dict`
Returns
-------
dict
The dictionary containing the memory spaces in the design
"""
self.client_request()
self.server_update()
return self._mem_dict
def reset(self, parser=None, timestamp=None, bitfile_name=None):
"""Reset all the dictionaries.
This method must be called after a bitstream download.
1. In case there is a `hwh` file, this method will reset
the states of the IP, GPIO, and interrupt dictionaries .
2. In case there is no `hwh` file, this method will simply
clear the state information stored for all dictionaries.
An existing parser given as the input can significantly reduce
the reset time, since the PL can reset based on the
information provided by the parser.
Parameters
----------
parser : HWH
A parser object to speed up the reset process.
"""
self.client_request()
if parser is not None:
self._ip_dict = parser.ip_dict
self._gpio_dict = parser.gpio_dict
self._interrupt_controllers = parser.interrupt_controllers
self._interrupt_pins = parser.interrupt_pins
self._hierarchy_dict = parser.hierarchy_dict
self._mem_dict = parser.mem_dict
else:
hwh_name = get_hwh_name(self._bitfile_name)
if os.path.isfile(hwh_name):
self._ip_dict = clear_state(self._ip_dict)
self._gpio_dict = clear_state(self._gpio_dict)
else:
self.clear_dict()
if timestamp is not None:
self._timestamp = timestamp
if bitfile_name is not None:
self._bitfile_name = bitfile_name
self.server_update()
def clear_dict(self):
"""Clear all the dictionaries stored in PL.
This method will clear all the related dictionaries, including IP
dictionary, GPIO dictionary, etc.
"""
self._ip_dict.clear()
self._gpio_dict.clear()
self._interrupt_controllers.clear()
self._interrupt_pins.clear()
self._hierarchy_dict.clear()
self._mem_dict.clear()
def load_ip_data(self, ip_name, data):
"""This method writes data to the addressable IP.
Note
----
The data is assumed to be in binary format (.bin). The data
name will be stored as a state information in the IP dictionary.
Parameters
----------
ip_name : str
The name of the addressable IP.
data : str
The absolute path of the data to be loaded.
zero : bool
Zero out the address of the IP not covered by data
Returns
-------
None
"""
self.client_request()
self._ip_dict[ip_name]['state'] = data
self.server_update()
def update_partial_region(self, hier, parser):
"""Merge the parser information from partial region.
Combine the currently PL information and the partial HWH file
parsing results.
Parameters
----------
hier : str
The name of the hierarchical block as the partial region.
parser : HWH
A parser object for the partial region.
"""
self.client_request()
self._update_pr_ip(parser, hier)
self._update_pr_gpio(parser)
self._update_pr_intr_pins(parser)
self._update_pr_hier(hier)
self.server_update()
def _update_pr_ip(self, parser, hier):
merged_ip_dict = deepcopy(self._ip_dict)
if type(parser) is HWH:
for k in merged_ip_dict.copy():
if k.startswith(hier) and 's_axi_control' not in k:
merged_ip_dict.pop(k)
for k, v in parser.ip_dict.items():
parent = k.split('/')[0] + '/' + v['mem_id']
if parent in self._ip_dict:
ip_name = v['fullpath']
merged_ip_dict[ip_name] = dict()
merged_ip_dict[ip_name]['fullpath'] = v['fullpath']
merged_ip_dict[ip_name]['parameters'] = v['parameters']
merged_ip_dict[ip_name]['phys_addr'] = \
self._ip_dict[parent]['phys_addr'] + v['phys_addr']
merged_ip_dict[ip_name]['addr_range'] = v['addr_range']
merged_ip_dict[ip_name]['registers'] = v['registers']
merged_ip_dict[ip_name]['state'] = None
merged_ip_dict[ip_name]['type'] = v['type']
merged_ip_dict[ip_name]['gpio'] = {}
merged_ip_dict[ip_name]['interrupts'] = {}
merged_ip_dict[ip_name]['mem_id'] = v['mem_id']
else:
raise ValueError("Cannot find HWH PR region parser.")
self._ip_dict = merged_ip_dict
def _update_pr_gpio(self, parser):
new_gpio_dict = dict()
for k, v in self._gpio_dict.items():
for pin in v['pins']:
if pin in parser.pins:
v |= parser.nets[parser.pins[pin]]
new_gpio_dict[k] = v
self._gpio_dict = new_gpio_dict
def _update_pr_intr_pins(self, parser):
new_interrupt_pins = dict()
for k, v in self._interrupt_pins.items():
if k in parser.pins:
net_set = parser.nets[parser.pins[k]]
hier_map = {i.count('/'): i for i in net_set}
hier_map = sorted(hier_map.items(), reverse=True)
fullpath = hier_map[0][-1]
new_interrupt_pins[fullpath] = deepcopy(v)
new_interrupt_pins[fullpath]['fullpath'] = fullpath
else:
new_interrupt_pins[k] = v
self._interrupt_pins = new_interrupt_pins
def _update_pr_hier(self, hier):
self._hierarchy_dict[hier] = {
'ip': dict(),
'hierarchies': dict(),
'interrupts': dict(),
'gpio': dict(),
'fullpath': hier,
'memories': dict()
}
for name, val in self._ip_dict.items():
hier, _, ip = name.rpartition('/')
if hier:
self._hierarchy_dict[hier]['ip'][ip] = val
self._hierarchy_dict[hier]['ip'][ip] = val
for name, val in self._hierarchy_dict.items():
hier, _, subhier = name.rpartition('/')
if hier:
self._hierarchy_dict[hier]['hierarchies'][subhier] = val
for interrupt, val in self._interrupt_pins.items():
block, _, pin = interrupt.rpartition('/')
if block in self._ip_dict:
self._ip_dict[block]['interrupts'][pin] = val
if block in self._hierarchy_dict:
self._hierarchy_dict[block]['interrupts'][pin] = val
for gpio in self._gpio_dict.values():
for connection in gpio['pins']:
ip, _, pin = connection.rpartition('/')
if ip in self._ip_dict:
self._ip_dict[ip]['gpio'][pin] = gpio
elif ip in self._hierarchy_dict:
self._hierarchy_dict[ip]['gpio'][pin] = gpio
def clear_devicetree(self):
"""Clear the device tree dictionary.
This should be used when downloading the full bitstream, where all the
dtbo are cleared from the system.
"""
for i in self._devicetree_dict:
self._devicetree_dict[i].remove()
def insert_device_tree(cls, abs_dtbo):
"""Insert device tree segment.
For device tree segments associated with full / partial bitstreams,
users can provide the relative or absolute paths of the dtbo files.
Parameters
----------
abs_dtbo : str
The absolute path to the device tree segment.
"""
cls.client_request()
dtbo_base_name = get_dtbo_base_name(abs_dtbo)
cls._devicetree_dict[dtbo_base_name] = DeviceTreeSegment(abs_dtbo)
cls._devicetree_dict[dtbo_base_name].remove()
cls._devicetree_dict[dtbo_base_name].insert()
cls.server_update()
def remove_device_tree(cls, abs_dtbo):
"""Remove device tree segment for the overlay.
Parameters
----------
abs_dtbo : str
The absolute path to the device tree segment.
"""
cls.client_request()
dtbo_base_name = get_dtbo_base_name(abs_dtbo)
cls._devicetree_dict[dtbo_base_name].remove()
del cls._devicetree_dict[dtbo_base_name]
cls.server_update()
def client_request(self):
"""Client connects to the PL server and receives the attributes.
This method should not be used by the users directly. To check open
pipes in the system, use `lsof | grep <address>` and
`kill -9 <pid>` to manually delete them.
Parameters
----------
address : str
The filename on the file system.
key : bytes
The authentication key of connection.
Returns
-------
None
"""
try:
self._remote = Client(self._address, family='AF_UNIX',
authkey=self._key)
except FileNotFoundError:
raise ConnectionError(
"Could not connect to PL server") from None
self._bitfile_name, self._timestamp, \
self._ip_dict, self._gpio_dict, \
self._interrupt_controllers, \
self._interrupt_pins, \
self._hierarchy_dict, \
self._devicetree_dict, \
self._mem_dict = self._remote.recv()
def server_update(self, continued=1):
self._remote.send([self._bitfile_name,
self._timestamp,
self._ip_dict,
self._gpio_dict,
self._interrupt_controllers,
self._interrupt_pins,
self._hierarchy_dict,
self._devicetree_dict,
self._mem_dict,
continued])
self._remote.close()
pass
class DeviceServer:
"""Class to provide an instance of the PL server
"""
def __init__(self, tag, key=b'xilinx'):
self.tag = tag
self.socket_name = PL_SERVER_TEMPLATE.format(tag)
self.key = key
self.thread = threading.Thread(target=self.server_proc)
self._data = [
"", # Bitfile name
None, # Timestamp
dict(), # IP Dict
dict(), # GPIO Dict
dict(), # Interrupt Dict
dict(), # Interrupt Pin Dict
dict(), # Hierarchy Dict
dict(), # Devicetree dict
dict() # Memory Dict
]
self._started = threading.Event()
def start(self, daemonize=True):
self.thread.daemon = daemonize
self.thread.start()
self._started.wait()
def server_proc(self):
if os.path.exists(self.socket_name):
os.remove(self.socket_name)
server = Listener(self.socket_name, family='AF_UNIX', authkey=self.key)
self._started.set()
status = True
while status:
client = server.accept()
client.send(self._data)
new_data = client.recv()
self._data = new_data[0:-1]
status = new_data[-1]
client.close()
server.close()
if os.path.exists(self.socket_name):
os.remove(self.socket_name)
def stop(self, wait_for_thread=True):
client = DeviceClient(self.tag, self.key)
client.client_request()
client.server_update(0)
if wait_for_thread:
self.thread.join()
def _start_server():
from .device import Device
Device.start_global = True
servers = [
DeviceServer(d.tag) for d in Device.devices
]
for s in servers:
s.start(False)
for s in servers:
s.thread.join()
def _stop_server():
from .device import Device
Device.start_global = True
servers = [
DeviceServer(d.tag) for d in Device.devices
]
for s in servers:
# This is called from a separate process so the threads aren't started
s.stop(False)
|
adeepkit01/ns3-dhcp-new
|
refs/heads/master
|
bindings/python/ns3modulescan-modular.py
|
186
|
#! /usr/bin/env python
import sys
import os.path
import pybindgen.settings
from pybindgen.gccxmlparser import ModuleParser, PygenClassifier, PygenSection, WrapperWarning, find_declaration_from_name
from pybindgen.typehandlers.codesink import FileCodeSink
from pygccxml.declarations import templates
from pygccxml.declarations.enumeration import enumeration_t
from pygccxml.declarations.class_declaration import class_t
from pygccxml.declarations.calldef import free_function_t, member_function_t, constructor_t, calldef_t
## we need the smart pointer type transformation to be active even
## during gccxml scanning.
import ns3modulegen_core_customizations
## silence gccxmlparser errors; we only want error handling in the
## generated python script, not while scanning.
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, dummy_wrapper, dummy_exception, dummy_traceback_):
return True
pybindgen.settings.error_handler = ErrorHandler()
import warnings
warnings.filterwarnings(category=WrapperWarning, action='ignore')
import ns3modulescan
type_annotations = ns3modulescan.type_annotations
def get_ns3_relative_path(path):
l = []
head = path
while head:
new_head, tail = os.path.split(head)
if new_head == head:
raise ValueError
head = new_head
if tail == 'ns3':
return os.path.join(*l)
l.insert(0, tail)
raise AssertionError("is the path %r inside ns3?!" % path)
class PreScanHook:
def __init__(self, headers_map, module):
self.headers_map = headers_map
self.module = module
def __call__(self, module_parser,
pygccxml_definition,
global_annotations,
parameter_annotations):
try:
ns3_header = get_ns3_relative_path(pygccxml_definition.location.file_name)
except ValueError: # the header is not from ns3
return # ignore the definition, it's not ns-3 def.
definition_module = self.headers_map[ns3_header]
## Note: we don't include line numbers in the comments because
## those numbers are very likely to change frequently, which would
## cause needless changes, since the generated python files are
## kept under version control.
#global_annotations['pygen_comment'] = "%s:%i: %s" % \
# (ns3_header, pygccxml_definition.location.line, pygccxml_definition)
global_annotations['pygen_comment'] = "%s (module %r): %s" % \
(ns3_header, definition_module, pygccxml_definition)
## handle ns3::Object::GetObject (left to its own devices,
## pybindgen will generate a mangled name containing the template
## argument type name).
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Object' \
and pygccxml_definition.name == 'GetObject':
template_args = templates.args(pygccxml_definition.demangled_name)
if template_args == ['ns3::Object']:
global_annotations['template_instance_names'] = 'ns3::Object=>GetObject'
## Don't wrap Simulator::Schedule* (manually wrapped)
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Simulator' \
and pygccxml_definition.name.startswith('Schedule'):
global_annotations['ignore'] = None
# manually wrapped
if isinstance(pygccxml_definition, member_function_t) \
and pygccxml_definition.parent.name == 'Simulator' \
and pygccxml_definition.name == 'Run':
global_annotations['ignore'] = True
## http://www.gccxml.org/Bug/view.php?id=9915
if isinstance(pygccxml_definition, calldef_t):
for arg in pygccxml_definition.arguments:
if arg.default_value is None:
continue
elif arg.default_value == "ns3::MilliSeconds( )":
arg.default_value = "ns3::MilliSeconds(0)"
elif arg.default_value == "ns3::Seconds( )":
arg.default_value = "ns3::Seconds(0)"
## classes
if isinstance(pygccxml_definition, class_t):
print >> sys.stderr, pygccxml_definition
# no need for helper classes to allow subclassing in Python, I think...
#if pygccxml_definition.name.endswith('Helper'):
# global_annotations['allow_subclassing'] = 'false'
#
# If a class is template instantiation, even if the
# template was defined in some other module, if a template
# argument belongs to this module then the template
# instantiation will belong to this module.
#
if templates.is_instantiation(pygccxml_definition.decl_string):
cls_name, template_parameters = templates.split(pygccxml_definition.name)
template_parameters_decls = [find_declaration_from_name(module_parser.global_ns, templ_param)
for templ_param in template_parameters]
#print >> sys.stderr, "********************", cls_name, repr(template_parameters_decls)
template_parameters_modules = []
for templ in template_parameters_decls:
if not hasattr(templ, 'location'):
continue
try:
h = get_ns3_relative_path(templ.location.file_name)
except ValueError:
continue
template_parameters_modules.append(self.headers_map[h])
for templ_mod in template_parameters_modules:
if templ_mod == self.module:
definition_module = templ_mod
break
#print >> sys.stderr, "********************", cls_name, repr(template_parameters_modules)
if definition_module != self.module:
global_annotations['import_from_module'] = 'ns.%s' % (definition_module.replace('-', '_'),)
if pygccxml_definition.decl_string.startswith('::ns3::SimpleRefCount<'):
global_annotations['incref_method'] = 'Ref'
global_annotations['decref_method'] = 'Unref'
global_annotations['peekref_method'] = 'GetReferenceCount'
global_annotations['automatic_type_narrowing'] = 'true'
return
if pygccxml_definition.decl_string.startswith('::ns3::Callback<'):
# manually handled in ns3modulegen_core_customizations.py
global_annotations['ignore'] = None
return
if pygccxml_definition.decl_string.startswith('::ns3::TracedCallback<'):
global_annotations['ignore'] = None
return
if pygccxml_definition.decl_string.startswith('::ns3::Ptr<'):
# handled by pybindgen "type transformation"
global_annotations['ignore'] = None
return
# table driven class customization
try:
annotations = type_annotations[pygccxml_definition.decl_string]
except KeyError:
pass
else:
global_annotations.update(annotations)
## enums
if isinstance(pygccxml_definition, enumeration_t):
if definition_module != self.module:
global_annotations['import_from_module'] = 'ns.%s' % definition_module
## free functions
if isinstance(pygccxml_definition, free_function_t):
if definition_module != self.module:
global_annotations['ignore'] = None
return
if pygccxml_definition.name == 'PeekPointer':
global_annotations['ignore'] = None
return
## table driven methods/constructors/functions customization
if isinstance(pygccxml_definition, (free_function_t, member_function_t, constructor_t)):
try:
annotations = type_annotations[str(pygccxml_definition)]
except KeyError:
pass
else:
for key,value in annotations.items():
if key == 'params':
parameter_annotations.update (value)
del annotations['params']
global_annotations.update(annotations)
# def post_scan_hook(dummy_module_parser, dummy_pygccxml_definition, pybindgen_wrapper):
# ## classes
# if isinstance(pybindgen_wrapper, CppClass):
# if pybindgen_wrapper.name.endswith('Checker'):
# print >> sys.stderr, "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", pybindgen_wrapper
# #pybindgen_wrapper.set_instance_creation_function(AttributeChecker_instance_creation_function)
def scan_callback_classes(module_parser, callback_classes_file):
callback_classes_file.write("callback_classes = [\n")
for cls in module_parser.module_namespace.classes(function=module_parser.location_filter,
recursive=False):
if not cls.name.startswith("Callback<"):
continue
assert templates.is_instantiation(cls.decl_string), "%s is not a template instantiation" % cls
dummy_cls_name, template_parameters = templates.split(cls.decl_string)
callback_classes_file.write(" %r,\n" % template_parameters)
callback_classes_file.write("]\n")
def ns3_module_scan(top_builddir, module_name, headers_map, output_file_name, cflags):
module_parser = ModuleParser('ns.%s' % module_name.replace('-', '_'), 'ns3')
module_parser.add_pre_scan_hook(PreScanHook(headers_map, module_name))
#module_parser.add_post_scan_hook(post_scan_hook)
gccxml_options = dict(
include_paths=[top_builddir],
define_symbols={
#'NS3_ASSERT_ENABLE': None,
#'NS3_LOG_ENABLE': None,
},
cflags=('--gccxml-cxxflags "%s -DPYTHON_SCAN"' % cflags)
)
try:
os.unlink(output_file_name)
except OSError:
pass
try:
os.makedirs(os.path.dirname(output_file_name))
except OSError:
pass
output_file = open(output_file_name, "wt")
output_sink = FileCodeSink(output_file)
# if there exists a scan-header.h file in src/<module>/bindings,
# scan it, otherwise scan ns3/xxxx-module.h.
scan_header = os.path.join(os.path.dirname(output_file_name), "scan-header.h")
if not os.path.exists(scan_header):
scan_header = os.path.join(top_builddir, "ns3", "%s-module.h" % module_name)
module_parser.parse_init([scan_header],
None, whitelist_paths=[top_builddir],
#includes=['"ns3/everything.h"'],
pygen_sink=output_sink,
gccxml_options=gccxml_options)
module_parser.scan_types()
callback_classes_file = open(os.path.join(os.path.dirname(output_file_name), "callbacks_list.py"), "wt")
scan_callback_classes(module_parser, callback_classes_file)
callback_classes_file.close()
module_parser.scan_methods()
module_parser.scan_functions()
module_parser.parse_finalize()
output_file.close()
os.chmod(output_file_name, 0400)
if __name__ == '__main__':
if len(sys.argv) != 6:
print "ns3modulescan-modular.py top_builddir module_path module_headers output_file_name cflags"
sys.exit(1)
ns3_module_scan(sys.argv[1], sys.argv[2], eval(sys.argv[3]), sys.argv[4], sys.argv[5])
sys.exit(0)
|
nickhdamico/py
|
refs/heads/master
|
lib/unidecode/x0a1.py
|
253
|
data = (
'dit', # 0x00
'dix', # 0x01
'di', # 0x02
'dip', # 0x03
'diex', # 0x04
'die', # 0x05
'diep', # 0x06
'dat', # 0x07
'dax', # 0x08
'da', # 0x09
'dap', # 0x0a
'duox', # 0x0b
'duo', # 0x0c
'dot', # 0x0d
'dox', # 0x0e
'do', # 0x0f
'dop', # 0x10
'dex', # 0x11
'de', # 0x12
'dep', # 0x13
'dut', # 0x14
'dux', # 0x15
'du', # 0x16
'dup', # 0x17
'durx', # 0x18
'dur', # 0x19
'tit', # 0x1a
'tix', # 0x1b
'ti', # 0x1c
'tip', # 0x1d
'tiex', # 0x1e
'tie', # 0x1f
'tiep', # 0x20
'tat', # 0x21
'tax', # 0x22
'ta', # 0x23
'tap', # 0x24
'tuot', # 0x25
'tuox', # 0x26
'tuo', # 0x27
'tuop', # 0x28
'tot', # 0x29
'tox', # 0x2a
'to', # 0x2b
'top', # 0x2c
'tex', # 0x2d
'te', # 0x2e
'tep', # 0x2f
'tut', # 0x30
'tux', # 0x31
'tu', # 0x32
'tup', # 0x33
'turx', # 0x34
'tur', # 0x35
'ddit', # 0x36
'ddix', # 0x37
'ddi', # 0x38
'ddip', # 0x39
'ddiex', # 0x3a
'ddie', # 0x3b
'ddiep', # 0x3c
'ddat', # 0x3d
'ddax', # 0x3e
'dda', # 0x3f
'ddap', # 0x40
'dduox', # 0x41
'dduo', # 0x42
'dduop', # 0x43
'ddot', # 0x44
'ddox', # 0x45
'ddo', # 0x46
'ddop', # 0x47
'ddex', # 0x48
'dde', # 0x49
'ddep', # 0x4a
'ddut', # 0x4b
'ddux', # 0x4c
'ddu', # 0x4d
'ddup', # 0x4e
'ddurx', # 0x4f
'ddur', # 0x50
'ndit', # 0x51
'ndix', # 0x52
'ndi', # 0x53
'ndip', # 0x54
'ndiex', # 0x55
'ndie', # 0x56
'ndat', # 0x57
'ndax', # 0x58
'nda', # 0x59
'ndap', # 0x5a
'ndot', # 0x5b
'ndox', # 0x5c
'ndo', # 0x5d
'ndop', # 0x5e
'ndex', # 0x5f
'nde', # 0x60
'ndep', # 0x61
'ndut', # 0x62
'ndux', # 0x63
'ndu', # 0x64
'ndup', # 0x65
'ndurx', # 0x66
'ndur', # 0x67
'hnit', # 0x68
'hnix', # 0x69
'hni', # 0x6a
'hnip', # 0x6b
'hniet', # 0x6c
'hniex', # 0x6d
'hnie', # 0x6e
'hniep', # 0x6f
'hnat', # 0x70
'hnax', # 0x71
'hna', # 0x72
'hnap', # 0x73
'hnuox', # 0x74
'hnuo', # 0x75
'hnot', # 0x76
'hnox', # 0x77
'hnop', # 0x78
'hnex', # 0x79
'hne', # 0x7a
'hnep', # 0x7b
'hnut', # 0x7c
'nit', # 0x7d
'nix', # 0x7e
'ni', # 0x7f
'nip', # 0x80
'niex', # 0x81
'nie', # 0x82
'niep', # 0x83
'nax', # 0x84
'na', # 0x85
'nap', # 0x86
'nuox', # 0x87
'nuo', # 0x88
'nuop', # 0x89
'not', # 0x8a
'nox', # 0x8b
'no', # 0x8c
'nop', # 0x8d
'nex', # 0x8e
'ne', # 0x8f
'nep', # 0x90
'nut', # 0x91
'nux', # 0x92
'nu', # 0x93
'nup', # 0x94
'nurx', # 0x95
'nur', # 0x96
'hlit', # 0x97
'hlix', # 0x98
'hli', # 0x99
'hlip', # 0x9a
'hliex', # 0x9b
'hlie', # 0x9c
'hliep', # 0x9d
'hlat', # 0x9e
'hlax', # 0x9f
'hla', # 0xa0
'hlap', # 0xa1
'hluox', # 0xa2
'hluo', # 0xa3
'hluop', # 0xa4
'hlox', # 0xa5
'hlo', # 0xa6
'hlop', # 0xa7
'hlex', # 0xa8
'hle', # 0xa9
'hlep', # 0xaa
'hlut', # 0xab
'hlux', # 0xac
'hlu', # 0xad
'hlup', # 0xae
'hlurx', # 0xaf
'hlur', # 0xb0
'hlyt', # 0xb1
'hlyx', # 0xb2
'hly', # 0xb3
'hlyp', # 0xb4
'hlyrx', # 0xb5
'hlyr', # 0xb6
'lit', # 0xb7
'lix', # 0xb8
'li', # 0xb9
'lip', # 0xba
'liet', # 0xbb
'liex', # 0xbc
'lie', # 0xbd
'liep', # 0xbe
'lat', # 0xbf
'lax', # 0xc0
'la', # 0xc1
'lap', # 0xc2
'luot', # 0xc3
'luox', # 0xc4
'luo', # 0xc5
'luop', # 0xc6
'lot', # 0xc7
'lox', # 0xc8
'lo', # 0xc9
'lop', # 0xca
'lex', # 0xcb
'le', # 0xcc
'lep', # 0xcd
'lut', # 0xce
'lux', # 0xcf
'lu', # 0xd0
'lup', # 0xd1
'lurx', # 0xd2
'lur', # 0xd3
'lyt', # 0xd4
'lyx', # 0xd5
'ly', # 0xd6
'lyp', # 0xd7
'lyrx', # 0xd8
'lyr', # 0xd9
'git', # 0xda
'gix', # 0xdb
'gi', # 0xdc
'gip', # 0xdd
'giet', # 0xde
'giex', # 0xdf
'gie', # 0xe0
'giep', # 0xe1
'gat', # 0xe2
'gax', # 0xe3
'ga', # 0xe4
'gap', # 0xe5
'guot', # 0xe6
'guox', # 0xe7
'guo', # 0xe8
'guop', # 0xe9
'got', # 0xea
'gox', # 0xeb
'go', # 0xec
'gop', # 0xed
'get', # 0xee
'gex', # 0xef
'ge', # 0xf0
'gep', # 0xf1
'gut', # 0xf2
'gux', # 0xf3
'gu', # 0xf4
'gup', # 0xf5
'gurx', # 0xf6
'gur', # 0xf7
'kit', # 0xf8
'kix', # 0xf9
'ki', # 0xfa
'kip', # 0xfb
'kiex', # 0xfc
'kie', # 0xfd
'kiep', # 0xfe
'kat', # 0xff
)
|
nervenXC/topical_word_embeddings
|
refs/heads/master
|
TWE-1/gensim/test/test_similarities.py
|
16
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for similarity algorithms (the similarities package).
"""
import logging
import unittest
import os
import tempfile
import numpy
from gensim.corpora import mmcorpus, Dictionary
from gensim import matutils, utils, similarities
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = [['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_similarities.tst.pkl')
class _TestSimilarityABC(object):
"""
Base class for SparseMatrixSimilarity and MatrixSimilarity unit tests.
"""
def testFull(self, num_best=None, shardsize=100):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=shardsize)
else:
index = self.cls(corpus, num_features=len(dictionary))
if isinstance(index, similarities.MatrixSimilarity):
expected = numpy.array([
[ 0.57735026, 0.57735026, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.40824831, 0.0, 0.0, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.40824831, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.5, 0.0, 0.0, 0.5, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0 ],
[ 0.0, 0.40824831, 0.0, 0.0, 0.0, 0.81649661, 0.0, 0.0, 0.40824831, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.57735026, 0.57735026, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.70710677, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026, 0.57735026 ],
[ 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.57735026 ]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, index.index))
index.num_best = num_best
query = corpus[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)][ : num_best]
# convert sims to full numpy arrays, so we can use allclose() and ignore
# ordering of items with the same similarity value
expected = matutils.sparse2full(expected, len(index))
if num_best is not None: # when num_best is None, sims is already a numpy array
sims = matutils.sparse2full(sims, len(index))
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testNumBest(self):
for num_best in [None, 0, 1, 9, 1000]:
self.testFull(num_best=num_best)
def testChunking(self):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
query = corpus[:3]
sims = index[query]
expected = numpy.array([
[ 0.99999994, 0.23570226, 0.28867513, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.23570226, 1.0, 0.40824831, 0.33333334, 0.70710677, 0.0, 0.0, 0.0, 0.23570226 ],
[ 0.28867513, 0.40824831, 1.0, 0.61237246, 0.28867513, 0.0, 0.0, 0.0, 0.0 ]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, sims))
# test the same thing but with num_best
index.num_best = 3
sims = index[query]
expected = [[(0, 0.99999994), (2, 0.28867513), (3, 0.23570226)],
[(1, 1.0), (4, 0.70710677), (2, 0.40824831)],
[(2, 1.0), (3, 0.61237246), (1, 0.40824831)]]
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testIter(self):
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
sims = [sim for sim in index]
expected = numpy.array([
[ 0.99999994, 0.23570226, 0.28867513, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.23570226, 1.0, 0.40824831, 0.33333334, 0.70710677, 0.0, 0.0, 0.0, 0.23570226 ],
[ 0.28867513, 0.40824831, 1.0, 0.61237246, 0.28867513, 0.0, 0.0, 0.0, 0.0 ],
[ 0.23570226, 0.33333334, 0.61237246, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.70710677, 0.28867513, 0.0, 0.99999994, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.70710677, 0.57735026, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.70710677, 0.99999994, 0.81649655, 0.40824828 ],
[ 0.0, 0.0, 0.0, 0.0, 0.0, 0.57735026, 0.81649655, 0.99999994, 0.66666663 ],
[ 0.0, 0.23570226, 0.0, 0.0, 0.0, 0.0, 0.40824828, 0.66666663, 0.99999994 ]
], dtype=numpy.float32)
self.assertTrue(numpy.allclose(expected, sims))
if self.cls == similarities.Similarity:
index.destroy()
def testPersistency(self):
fname = testfile()
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
index.save(fname)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testLarge(self):
fname = testfile()
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
# store all arrays separately
index.save(fname, sep_limit=0)
index2 = self.cls.load(fname)
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
def testMmap(self):
fname = testfile()
if self.cls == similarities.Similarity:
index = self.cls(None, corpus, num_features=len(dictionary), shardsize=5)
else:
index = self.cls(corpus, num_features=len(dictionary))
# store all arrays separately
index.save(fname, sep_limit=0)
# same thing, but use mmap to load arrays
index2 = self.cls.load(fname, mmap='r')
if self.cls == similarities.Similarity:
# for Similarity, only do a basic check
self.assertTrue(len(index.shards) == len(index2.shards))
index.destroy()
else:
if isinstance(index, similarities.SparseMatrixSimilarity):
# hack SparseMatrixSim indexes so they're easy to compare
index.index = index.index.todense()
index2.index = index2.index.todense()
self.assertTrue(numpy.allclose(index.index, index2.index))
self.assertEqual(index.num_best, index2.num_best)
class TestMatrixSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.MatrixSimilarity
class TestSparseMatrixSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.SparseMatrixSimilarity
class TestSimilarity(unittest.TestCase, _TestSimilarityABC):
def setUp(self):
self.cls = similarities.Similarity
def testSharding(self):
for num_best in [None, 0, 1, 9, 1000]:
for shardsize in [1, 2, 9, 1000]:
self.testFull(num_best=num_best, shardsize=shardsize)
def testReopen(self):
"""test re-opening partially full shards"""
index = similarities.Similarity(None, corpus[:5], num_features=len(dictionary), shardsize=9)
_ = index[corpus[0]] # forces shard close
index.add_documents(corpus[5:])
query = corpus[0]
sims = index[query]
expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)]
expected = matutils.sparse2full(expected, len(index))
self.assertTrue(numpy.allclose(expected, sims))
index.destroy()
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
trafi/djinni
|
refs/heads/master
|
test-suite/generated-src/python/record_with_derivings_helper.py
|
1
|
# AUTOGENERATED FILE - DO NOT MODIFY!
# This file generated by Djinni from derivings.djinni
from djinni.support import MultiSet # default imported in all files
from djinni.exception import CPyException # default imported in all files
from djinni.pycffi_marshal import CPyPrimitive, CPyRecord, CPyString
from PyCFFIlib_cffi import ffi, lib
from djinni import exception # this forces run of __init__.py which gives cpp option to call back into py to create exception
from record_with_derivings import RecordWithDerivings
class RecordWithDerivingsHelper:
@staticmethod
def release(c_ptr):
assert c_ptr in c_data_set
c_data_set.remove(ffi.cast("void*", c_ptr))
@ffi.callback("int32_t(struct DjinniRecordHandle *)")
def get_record_with_derivings_f1(cself):
try:
_ret = CPyPrimitive.fromPy(CPyRecord.toPy(None, cself).key1)
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniString *(struct DjinniRecordHandle *)")
def get_record_with_derivings_f2(cself):
try:
with CPyString.fromPy(CPyRecord.toPy(None, cself).key2) as py_obj:
_ret = py_obj.release_djinni_string()
assert _ret != ffi.NULL
return _ret
except Exception as _djinni_py_e:
CPyException.setExceptionFromPy(_djinni_py_e)
return ffi.NULL
@ffi.callback("struct DjinniRecordHandle *(int32_t,struct DjinniString *)")
def python_create_record_with_derivings(key1,key2):
py_rec = RecordWithDerivings(
CPyPrimitive.toPy(key1),
CPyString.toPy(key2))
return CPyRecord.fromPy(RecordWithDerivings.c_data_set, py_rec) #to do: can be optional?
@ffi.callback("void (struct DjinniRecordHandle *)")
def __delete(dh):
assert dh in RecordWithDerivings.c_data_set
RecordWithDerivings.c_data_set.remove(dh)
@staticmethod
def _add_callbacks():
lib.record_with_derivings_add_callback_get_record_with_derivings_f1(RecordWithDerivingsHelper.get_record_with_derivings_f1)
lib.record_with_derivings_add_callback_get_record_with_derivings_f2(RecordWithDerivingsHelper.get_record_with_derivings_f2)
lib.record_with_derivings_add_callback_python_create_record_with_derivings(RecordWithDerivingsHelper.python_create_record_with_derivings)
lib.record_with_derivings_add_callback___delete(RecordWithDerivingsHelper.__delete)
RecordWithDerivingsHelper._add_callbacks()
|
yyjiang/scikit-learn
|
refs/heads/master
|
examples/svm/plot_iris.py
|
225
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
memo/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/standard_ops.py
|
21
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import spectral_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.util.all_util import remove_undocumented
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import *
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.math_ops import *
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import *
from tensorflow.python.ops.variables import *
# pylint: enable=wildcard-import
#### For use in remove_undocumented below:
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import check_ops as _check_ops
from tensorflow.python.ops import clip_ops as _clip_ops
from tensorflow.python.ops import confusion_matrix as _confusion_matrix
from tensorflow.python.ops import control_flow_ops as _control_flow_ops
from tensorflow.python.ops import data_flow_ops as _data_flow_ops
from tensorflow.python.ops import functional_ops as _functional_ops
from tensorflow.python.ops import gradients as _gradients
from tensorflow.python.ops import histogram_ops as _histogram_ops
from tensorflow.python.ops import init_ops as _init_ops
from tensorflow.python.ops import io_ops as _io_ops
from tensorflow.python.ops import linalg_ops as _linalg_ops
from tensorflow.python.ops import logging_ops as _logging_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.ops import numerics as _numerics
from tensorflow.python.ops import parsing_ops as _parsing_ops
from tensorflow.python.ops import partitioned_variables as _partitioned_variables
from tensorflow.python.ops import random_ops as _random_ops
from tensorflow.python.ops import script_ops as _script_ops
from tensorflow.python.ops import session_ops as _session_ops
from tensorflow.python.ops import sparse_ops as _sparse_ops
from tensorflow.python.ops import special_math_ops as _special_math_ops
from tensorflow.python.ops import state_ops as _state_ops
from tensorflow.python.ops import string_ops as _string_ops
from tensorflow.python.ops import template as _template
from tensorflow.python.ops import tensor_array_ops as _tensor_array_ops
from tensorflow.python.ops import variable_scope as _variable_scope
from tensorflow.python.ops import variables as _variables
_allowed_symbols_math_ops = [
# TODO(drpng): decide if we want to reference these in the documentation.
"reduced_shape",
"sparse_segment_mean_grad",
"sparse_segment_sqrt_n_grad",
# Legacy: will be removed.
"arg_max",
"arg_min",
"lin_space",
"sparse_matmul", # Use tf.matmul.
# Deprecated (see versions.h):
"batch_fft",
"batch_fft2d",
"batch_fft3d",
"batch_ifft",
"batch_ifft2d",
"batch_ifft3d",
"mul", # use tf.multiply instead.
"neg", # use tf.negative instead.
"sub", # use tf.subtract instead.
# These are documented in nn.
# We are are not importing nn because it would create a circular dependency.
"sigmoid",
"log_sigmoid",
"tanh",
]
_allowed_symbols_array_ops = [
# TODO(drpng): make sure they are documented.
# Scalars:
"NEW_AXIS",
"SHRINK_AXIS",
"newaxis",
# Documented in training.py.
# I do not import train, to avoid circular dependencies.
# TODO(drpng): this is defined in gen_array_ops, clearly not the right
# place.
"stop_gradient",
# See gen_docs_combined for tf.copy documentation.
"copy",
## TODO(drpng): make them inaccessible directly.
## TODO(drpng): Below, to-doc means that we need to find an appropriate
## documentation section to reference.
## For re-exporting to tf.*:
"constant",
"edit_distance", # to-doc
# From gen_array_ops:
"copy_host", # to-doc
"immutable_const", # to-doc
"invert_permutation", # to-doc
"quantize_and_dequantize", # to-doc
# TODO(drpng): legacy symbols to be removed.
"list_diff", # Use tf.listdiff instead.
"batch_matrix_diag",
"batch_matrix_band_part",
"batch_matrix_diag_part",
"batch_matrix_set_diag",
]
_allowed_symbols_partitioned_variables = [
"PartitionedVariable", # Requires doc link.
# Legacy.
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
_allowed_symbols_control_flow_ops = [
# TODO(drpng): Find a place in the documentation to reference these or
# remove.
"control_trigger",
"loop_cond",
"merge",
"switch",
]
_allowed_symbols_functional_ops = [
"nest", # Used by legacy code.
]
_allowed_symbols_gradients = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"AggregationMethod",
"gradients", # tf.gradients = gradients.gradients
"hessians",
]
_allowed_symbols_clip_ops = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"clip_by_average_norm",
"clip_by_global_norm",
"clip_by_norm",
"clip_by_value",
"global_norm",
]
_allowed_symbols_image_ops = [
# Documented in training.py.
# We are not importing training.py to avoid complex dependencies.
"audio_summary",
"histogram_summary",
"image_summary",
"merge_all_summaries",
"merge_summary",
"scalar_summary",
# TODO(drpng): link in training.py if it should be documented.
"get_summary_op",
]
_allowed_symbols_variable_scope_ops = [
"get_local_variable", # Documented in framework package.
]
_allowed_symbols_misc = [
"deserialize_many_sparse",
"parse_single_sequence_example",
"serialize_many_sparse",
"serialize_sparse",
"confusion_matrix",
]
_allowed_symbols = (_allowed_symbols_array_ops +
_allowed_symbols_clip_ops +
_allowed_symbols_control_flow_ops +
_allowed_symbols_functional_ops +
_allowed_symbols_image_ops +
_allowed_symbols_gradients +
_allowed_symbols_math_ops +
_allowed_symbols_variable_scope_ops +
_allowed_symbols_misc +
_allowed_symbols_partitioned_variables)
remove_undocumented(__name__, _allowed_symbols,
[_sys.modules[__name__],
_array_ops,
_check_ops,
_clip_ops,
_confusion_matrix,
_control_flow_ops,
_constant_op,
_data_flow_ops,
_functional_ops,
_gradients,
_histogram_ops,
_init_ops,
_io_ops,
_linalg_ops,
_logging_ops,
_math_ops,
_numerics,
_parsing_ops,
_partitioned_variables,
_random_ops,
_script_ops,
_session_ops,
_sparse_ops,
_special_math_ops,
_state_ops,
_string_ops,
_template,
_tensor_array_ops,
_variable_scope,
_variables,])
|
suncycheng/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/DuplicateMultiLine.before.py
|
75
|
def bar():
<selection>a = 1
print a</selection>
a = 1
print a
|
pombredanne/marisa-trie
|
refs/heads/master
|
tests/test_binary_trie.py
|
3
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pickle
from collections import Mapping
import pytest
import hypothesis.strategies as st
from hypothesis import given, assume
import marisa_trie
text = st.binary()
@given(st.sets(text), text)
def test_init(keys, missing_key):
assume(missing_key not in keys)
trie = marisa_trie.BinaryTrie(keys)
for key in keys:
assert key in trie
assert missing_key not in trie
@given(st.sets(text, min_size=1), text)
def test_key_id(keys, missing_key):
assume(missing_key not in keys)
trie = marisa_trie.BinaryTrie(keys)
for key in keys:
key_id = trie.key_id(key)
assert trie.restore_key(key_id) == key
key_ids = [trie.key_id(key) for key in keys]
non_existing_id = max(key_ids) + 1
with pytest.raises(KeyError):
trie.restore_key(non_existing_id)
with pytest.raises(KeyError):
trie.key_id(missing_key)
@given(st.sets(text, min_size=1), text)
def test_getitem(keys, missing_key):
assume(missing_key not in keys)
trie = marisa_trie.BinaryTrie(keys)
for key in keys:
key_id = trie[key]
assert trie.restore_key(key_id) == key
key_ids = [trie[key] for key in keys]
non_existing_id = max(key_ids) + 1
with pytest.raises(KeyError):
trie.restore_key(non_existing_id)
with pytest.raises(KeyError):
trie[missing_key]
@given(st.sets(text))
def test_get(keys):
trie = marisa_trie.BinaryTrie(keys)
for key in keys:
key_id = trie.get(key)
assert trie.restore_key(key_id) == key
key_id = trie.get(key, "default value")
assert trie.restore_key(key_id) == key
assert trie.get(b"non_existing_bytes_key") is None
assert trie.get(b"non_existing_bytes_key",
"default value") == "default value"
@given(st.sets(text))
def test_saveload(tmpdir, keys):
trie = marisa_trie.BinaryTrie(keys)
path = str(tmpdir.join("trie.bin"))
with open(path, "wb") as f:
trie.write(f)
with open(path, "rb") as f:
trie2 = marisa_trie.BinaryTrie()
trie2.read(f)
for key in keys:
assert key in trie2
@given(st.sets(text))
def test_mmap(tmpdir, keys):
trie = marisa_trie.BinaryTrie(keys)
path = str(tmpdir.join("trie.bin"))
with open(path, "wb") as f:
trie.write(f)
trie2 = marisa_trie.BinaryTrie()
trie2.mmap(path)
for key in keys:
assert key in trie2
@given(st.sets(text))
def test_tobytes_frombytes(keys):
trie = marisa_trie.BinaryTrie(keys)
data = trie.tobytes()
trie2 = marisa_trie.BinaryTrie().frombytes(data)
for key in keys:
assert key in trie2
assert trie2.key_id(key) == trie.key_id(key)
@given(st.sets(text))
def test_dumps_loads(keys):
trie = marisa_trie.BinaryTrie(keys)
data = pickle.dumps(trie)
trie2 = pickle.loads(data)
for key in keys:
assert key in trie2
assert trie2.key_id(key) == trie.key_id(key)
def test_contains_empty():
assert b"foo" not in marisa_trie.BinaryTrie()
def test_contains_singleton():
trie = marisa_trie.BinaryTrie([b"foo"])
assert b"foo" in trie
assert b"f" not in trie
def test_eq_self():
trie = marisa_trie.BinaryTrie()
assert trie == trie
assert trie == marisa_trie.BinaryTrie()
def test_eq_neq():
trie = marisa_trie.BinaryTrie([b"foo", b"bar"])
assert trie == marisa_trie.BinaryTrie([b"foo", b"bar"])
assert trie != marisa_trie.BinaryTrie([b"foo", b"boo"])
def test_neq_different_type():
assert marisa_trie.BinaryTrie([b"foo", b"bar"]) != {}
def test_eq_neq_different_order():
lo_trie = marisa_trie.BinaryTrie(order=marisa_trie.LABEL_ORDER)
wo_trie = marisa_trie.BinaryTrie(order=marisa_trie.WEIGHT_ORDER)
assert lo_trie == lo_trie and wo_trie == wo_trie
assert lo_trie != wo_trie
def test_gt_lt_exceptions():
with pytest.raises(TypeError):
marisa_trie.BinaryTrie() < marisa_trie.BinaryTrie()
with pytest.raises(TypeError):
marisa_trie.BinaryTrie() > marisa_trie.BinaryTrie()
def test_iter():
trie = marisa_trie.BinaryTrie([b"foo", b"bar"])
assert list(trie) == list(trie.iterkeys())
def test_len():
trie = marisa_trie.BinaryTrie()
assert len(trie) == 0
trie = marisa_trie.BinaryTrie([b"foo", b"f", b"bar"])
assert len(trie) == 3
def test_prefixes():
trie = marisa_trie.BinaryTrie([b"foo", b"f", b"foobar", b"bar"])
assert trie.prefixes(b"foobar") == [b"f", b"foo", b"foobar"]
assert trie.prefixes(b"foo") == [b"f", b"foo"]
assert trie.prefixes(b"bar") == [b"bar"]
assert trie.prefixes(b"b") == []
assert list(trie.iter_prefixes(b"foobar")) == [b"f", b"foo", b"foobar"]
def test_keys():
keys = [b"foo", b"f", b"foobar", b"bar"]
trie = marisa_trie.BinaryTrie(keys)
assert set(trie.keys()) == set(keys)
def test_keys_prefix():
keys = [b"foo", b"f", b"foobar", b"bar"]
trie = marisa_trie.BinaryTrie(keys)
assert set(trie.keys(b"fo")) == set([b"foo", b"foobar"])
assert trie.keys(b"foobarz") == []
@given(st.sets(text))
def test_iterkeys(keys):
trie = marisa_trie.BinaryTrie(keys)
assert trie.keys() == list(trie.iterkeys())
for key in keys:
prefix = key[:5]
assert trie.keys(prefix) == list(trie.iterkeys(prefix))
def test_items():
keys = [b"foo", b"f", b"foobar", b"bar"]
trie = marisa_trie.BinaryTrie(keys)
items = trie.items()
assert set(items) == set(zip(keys, (trie[k] for k in keys)))
def test_items_prefix():
keys = [b"foo", b"f", b"foobar", b"bar"]
trie = marisa_trie.BinaryTrie(keys)
assert set(trie.items(b"fo")) == set([
(b"foo", trie[b"foo"]),
(b"foobar", trie[b"foobar"]),
])
@given(st.sets(text))
def test_iteritems(keys):
trie = marisa_trie.BinaryTrie(keys)
assert trie.items() == list(trie.iteritems())
for key in keys:
prefix = key[:5]
assert trie.items(prefix) == list(trie.iteritems(prefix))
def test_has_keys_with_prefix_empty():
empty_trie = marisa_trie.BinaryTrie()
assert not empty_trie.has_keys_with_prefix(b'')
assert not empty_trie.has_keys_with_prefix(b'ab')
def test_invalid_file():
try:
marisa_trie.BinaryTrie().load(__file__)
except RuntimeError as e:
assert "MARISA_FORMAT_ERROR" in e.args[0]
else:
pytest.fail("Exception is not raised")
def test_mutable_mapping():
for method in Mapping.__abstractmethods__:
assert hasattr(marisa_trie.BinaryTrie, method)
|
Youwotma/portia
|
refs/heads/master
|
docs/conf.py
|
8
|
# -*- coding: utf-8 -*-
#
# Portia documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 25 13:51:18 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from datetime import datetime
from os import path
VERSION_FILE = path.abspath(path.join(path.dirname(__file__), '..', 'VERSION'))
YEAR = datetime.now().year
with open(VERSION_FILE, 'r') as f:
RELEASE = f.read().strip()
VERSION = RELEASE.rsplit('.', 1)[0]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Portia'
copyright = u'{}, Scrapinghub'.format(YEAR)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = RELEASE
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Portiadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Portia.tex', u'Portia Documentation',
u'Scrapinghub', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'portia', u'Portia Documentation',
[u'Scrapinghub'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Portia', u'Portia Documentation',
u'Scrapinghub', 'Portia', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
### Following is taken from https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
### end
|
jvassev/dd-agent
|
refs/heads/master
|
checks.d/ntp.py
|
34
|
# 3p
import ntplib
# project
from checks import AgentCheck
from utils.ntp import get_ntp_args, set_user_ntp_settings
DEFAULT_OFFSET_THRESHOLD = 60 # in seconds
class NtpCheck(AgentCheck):
DEFAULT_MIN_COLLECTION_INTERVAL = 900 # in seconds
def check(self, instance):
service_check_msg = None
offset_threshold = instance.get('offset_threshold', DEFAULT_OFFSET_THRESHOLD)
try:
offset_threshold = int(offset_threshold)
except (TypeError, ValueError):
raise Exception('Must specify an integer value for offset_threshold. Configured value is %s' % repr(offset_threshold))
set_user_ntp_settings(dict(instance))
req_args = get_ntp_args()
self.log.debug("Using ntp host: {0}".format(req_args['host']))
try:
ntp_stats = ntplib.NTPClient().request(**req_args)
except ntplib.NTPException:
self.log.debug("Could not connect to NTP Server {0}".format(
req_args['host']))
status = AgentCheck.UNKNOWN
ntp_ts = None
else:
ntp_offset = ntp_stats.offset
# Use the ntp server's timestamp for the time of the result in
# case the agent host's clock is messed up.
ntp_ts = ntp_stats.recv_time
self.gauge('ntp.offset', ntp_offset, timestamp=ntp_ts)
if abs(ntp_offset) > offset_threshold:
status = AgentCheck.CRITICAL
service_check_msg = "Offset {0} secs higher than offset threshold ({1} secs)".format(ntp_offset, offset_threshold)
else:
status = AgentCheck.OK
self.service_check('ntp.in_sync', status, timestamp=ntp_ts, message=service_check_msg)
|
Moulde/django-extensions
|
refs/heads/master
|
django_extensions/management/commands/sqlcreate.py
|
27
|
import socket
import sys
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-R', '--router', action='store',
dest='router', default='default',
help='Use this router-database other then defined in settings.py'),
make_option('-D', '--drop', action='store_true',
dest='drop', default=False,
help='If given, includes commands to drop any existing user and database.'),
)
help = """Generates the SQL to create your database for you, as specified in settings.py
The envisioned use case is something like this:
./manage.py sqlcreate [--router=<routername>] | mysql -u <db_administrator> -p
./manage.py sqlcreate [--router=<routername>] | psql -U <db_administrator> -W"""
requires_system_checks = False
can_import_settings = True
@signalcommand
def handle(self, *args, **options):
router = options.get('router')
dbinfo = settings.DATABASES.get(router)
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
dbuser = dbinfo.get('USER')
dbpass = dbinfo.get('PASSWORD')
dbname = dbinfo.get('NAME')
dbhost = dbinfo.get('HOST')
dbclient = socket.gethostname()
# django settings file tells you that localhost should be specified by leaving
# the DATABASE_HOST blank
if not dbhost:
dbhost = 'localhost'
if engine == 'mysql':
sys.stderr.write("""-- WARNING!: https://docs.djangoproject.com/en/dev/ref/databases/#collation-settings
-- Please read this carefully! Collation will be set to utf8_bin to have case-sensitive data.
""")
print("CREATE DATABASE %s CHARACTER SET utf8 COLLATE utf8_bin;" % dbname)
print("GRANT ALL PRIVILEGES ON %s.* to '%s'@'%s' identified by '%s';" % (
dbname, dbuser, dbclient, dbpass
))
elif engine == 'postgresql_psycopg2':
if options.get('drop'):
print("DROP DATABASE IF EXISTS %s;" % (dbname,))
print("DROP USER IF EXISTS %s;" % (dbuser,))
print("CREATE USER %s WITH ENCRYPTED PASSWORD '%s' CREATEDB;" % (dbuser, dbpass))
print("CREATE DATABASE %s WITH ENCODING 'UTF-8' OWNER \"%s\";" % (dbname, dbuser))
print("GRANT ALL PRIVILEGES ON DATABASE %s TO %s;" % (dbname, dbuser))
elif engine == 'sqlite3':
sys.stderr.write("-- manage.py syncdb will automatically create a sqlite3 database file.\n")
else:
# CREATE DATABASE is not SQL standard, but seems to be supported by most.
sys.stderr.write("-- Don't know how to handle '%s' falling back to SQL.\n" % engine)
print("CREATE DATABASE %s;" % dbname)
print("GRANT ALL PRIVILEGES ON DATABASE %s to %s" % (dbname, dbuser))
|
nv8h/PyRattus
|
refs/heads/master
|
base/modules/rat/registry.py
|
1
|
registry_data = {}
def setValue(key, value):
registry_data[key] = value
return
def getValue(key):
if (key in registry_data):
return registry_data[key]
# end if
return None
def testKey(key):
return (key in registry_data)
|
danstoner/python_experiments
|
refs/heads/master
|
playing_with_kivy/kivi-examples/canvas/lines_extended.py
|
17
|
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
from kivy.lang import Builder
Builder.load_string('''
<LineEllipse1>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height)
Label:
center: root.center
text: 'Ellipse'
<LineEllipse2>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height, 90, 180)
Label:
center: root.center
text: 'Ellipse from 90 to 180'
# fun result with low segments!
<LineEllipse3>:
canvas:
Color:
rgba: 1, .1, .1, .9
Line:
width: 2.
ellipse: (self.x, self.y, self.width, self.height, 90, 720, 10)
Label:
center: root.center
text: 'Ellipse from 90 to 720\\n10 segments'
halign: 'center'
<LineCircle1>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2)
Label:
center: root.center
text: 'Circle'
<LineCircle2>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 90, 180)
Label:
center: root.center
text: 'Circle from 90 to 180'
<LineCircle3>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 90, 180, 10)
Label:
center: root.center
text: 'Circle from 90 to 180\\n10 segments'
halign: 'center'
<LineCircle4>:
canvas:
Color:
rgba: .1, 1, .1, .9
Line:
width: 2.
circle: (self.center_x, self.center_y, min(self.width, self.height) / 2, 0, 360)
Label:
center: root.center
text: 'Circle from 0 to 360'
halign: 'center'
<LineRectangle>:
canvas:
Color:
rgba: .1, .1, 1, .9
Line:
width: 2.
rectangle: (self.x, self.y, self.width, self.height)
Label:
center: root.center
text: 'Rectangle'
<LineBezier>:
canvas:
Color:
rgba: .1, .1, 1, .9
Line:
width: 2.
bezier: (self.x, self.y, self.center_x - 40, self.y + 100, self.center_x + 40, self.y - 100, self.right, self.y)
Label:
center: root.center
text: 'Bezier'
''')
class LineEllipse1(Widget):
pass
class LineEllipse2(Widget):
pass
class LineEllipse3(Widget):
pass
class LineCircle1(Widget):
pass
class LineCircle2(Widget):
pass
class LineCircle3(Widget):
pass
class LineCircle4(Widget):
pass
class LineRectangle(Widget):
pass
class LineBezier(Widget):
pass
class LineExtendedApp(App):
def build(self):
root = GridLayout(cols=2, padding=50, spacing=50)
root.add_widget(LineEllipse1())
root.add_widget(LineEllipse2())
root.add_widget(LineEllipse3())
root.add_widget(LineCircle1())
root.add_widget(LineCircle2())
root.add_widget(LineCircle3())
root.add_widget(LineCircle4())
root.add_widget(LineRectangle())
root.add_widget(LineBezier())
return root
if __name__ == '__main__':
LineExtendedApp().run()
|
40223211/logo-toys
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/warnings.py
|
752
|
"""Python part of the warnings subsystem."""
# Note: function level imports should *not* be used
# in this module as it may cause import lock deadlock.
# See bug 683658.
import linecache
import sys
__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
if file is None:
file = sys.stderr
try:
file.write(formatwarning(message, category, filename, lineno, line))
except IOError:
pass # the file (probably stderr) is invalid - this warning gets lost.
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
line = linecache.getline(filename, lineno) if line is None else line
if line:
line = line.strip()
s += " %s\n" % line
return s
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, re.compile(message, re.I), category,
re.compile(module), lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
item = (action, None, category, None, lineno)
if append:
filters.append(item)
else:
filters.insert(0, item)
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# Get context information
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
# Print message and context
showwarning(message, category, filename, lineno)
class WarningMessage(object):
"""Holds the result of a single showwarning() call."""
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
|
bllli/ingress-keys-management
|
refs/heads/django
|
backend/admin.py
|
1
|
from django.contrib import admin
from backend.models import Portal, Comment, Tag, TagType
class TagInline(admin.StackedInline):
model = Tag
class PortalInline(admin.StackedInline):
model = Portal
@admin.register(Portal)
class PortalModelAdmin(admin.ModelAdmin):
list_display = ['pk', 'title', 'nickname', 'link']
list_display_links = ['title']
list_filter = ['tags']
search_fields = ['title', 'nickname']
list_per_page = 20
readonly_fields = ['image', 'late6', 'lnge6']
fieldsets = [
('', {'fields': ['title', 'nickname', 'link', 'image']}),
('Location', {'fields': ['late6', 'lnge6']}),
('Rel', {'fields': ['author', 'tags', 'adder']})
]
@admin.register(Comment)
class CommentModelAdmin(admin.ModelAdmin):
pass
@admin.register(Tag)
class TagModelAdmin(admin.ModelAdmin):
pass
@admin.register(TagType)
class TagTypeModelAdmin(admin.ModelAdmin):
pass
|
FedericoCeratto/debian-pymongo
|
refs/heads/master
|
pymongo/response.py
|
17
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Represent a response from the server."""
class Response(object):
__slots__ = ('_data', '_address')
def __init__(self, data, address):
"""Represent a response from the server.
:Parameters:
- `data`: Raw BSON bytes.
- `address`: (host, port) of the source server.
"""
self._data = data
self._address = address
@property
def data(self):
"""Server response's raw BSON bytes."""
return self._data
@property
def address(self):
"""(host, port) of the source server."""
return self._address
class ExhaustResponse(Response):
__slots__ = ('_socket_info', '_pool')
def __init__(self, data, address, socket_info, pool):
"""Represent a response to an exhaust cursor's initial query.
:Parameters:
- `data`: Raw BSON bytes.
- `address`: (host, port) of the source server.
- `socket_info`: The SocketInfo used for the initial query.
- `pool`: The Pool from which the SocketInfo came.
"""
super(ExhaustResponse, self).__init__(data, address)
self._socket_info = socket_info
self._pool = pool
@property
def socket_info(self):
"""The SocketInfo used for the initial query.
The server will send batches on this socket, without waiting for
getMores from the client, until the result set is exhausted or there
is an error.
"""
return self._socket_info
@property
def pool(self):
"""The Pool from which the SocketInfo came."""
return self._pool
|
huihoo/reader
|
refs/heads/master
|
vendor/paypal/standard/ipn/admin.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from vendor.paypal.standard.ipn.models import PayPalIPN
class PayPalIPNAdmin(admin.ModelAdmin):
date_hierarchy = 'payment_date'
fieldsets = (
(None, {
"fields": [
"flag", "txn_id", "txn_type", "payment_status", "payment_date",
"transaction_entity", "reason_code", "pending_reason",
"mc_gross", "mc_fee", "auth_status", "auth_amount", "auth_exp",
"auth_id"
]
}),
("Address", {
"description": "The address of the Buyer.",
'classes': ('collapse',),
"fields": [
"address_city", "address_country", "address_country_code",
"address_name", "address_state", "address_status",
"address_street", "address_zip"
]
}),
("Buyer", {
"description": "The information about the Buyer.",
'classes': ('collapse',),
"fields": [
"first_name", "last_name", "payer_business_name", "payer_email",
"payer_id", "payer_status", "contact_phone", "residence_country"
]
}),
("Seller", {
"description": "The information about the Seller.",
'classes': ('collapse',),
"fields": [
"business", "item_name", "item_number", "quantity",
"receiver_email", "receiver_id", "custom", "invoice", "memo"
]
}),
("Recurring", {
"description": "Information about recurring Payments.",
"classes": ("collapse",),
"fields": [
"profile_status", "initial_payment_amount", "amount_per_cycle",
"outstanding_balance", "period_type", "product_name",
"product_type", "recurring_payment_id", "receipt_id",
"next_payment_date"
]
}),
("Admin", {
"description": "Additional Info.",
"classes": ('collapse',),
"fields": [
"test_ipn", "ipaddress", "query", "response", "flag_code",
"flag_info"
]
}),
)
list_display = [
"__unicode__", "flag", "flag_info", "invoice", "custom",
"payment_status", "created_at"
]
search_fields = ["txn_id", "recurring_payment_id"]
admin.site.register(PayPalIPN, PayPalIPNAdmin)
|
metabrainz/picard
|
refs/heads/master
|
picard/ui/ui_options_general.py
|
2
|
# -*- coding: utf-8 -*-
# Automatically generated - don't edit.
# Use `python setup.py build_ui` to update it.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_GeneralOptionsPage(object):
def setupUi(self, GeneralOptionsPage):
GeneralOptionsPage.setObjectName("GeneralOptionsPage")
GeneralOptionsPage.resize(403, 599)
self.vboxlayout = QtWidgets.QVBoxLayout(GeneralOptionsPage)
self.vboxlayout.setObjectName("vboxlayout")
self.groupBox = QtWidgets.QGroupBox(GeneralOptionsPage)
self.groupBox.setObjectName("groupBox")
self.gridlayout = QtWidgets.QGridLayout(self.groupBox)
self.gridlayout.setSpacing(2)
self.gridlayout.setObjectName("gridlayout")
self.server_port = QtWidgets.QSpinBox(self.groupBox)
self.server_port.setMinimum(1)
self.server_port.setMaximum(65535)
self.server_port.setProperty("value", 80)
self.server_port.setObjectName("server_port")
self.gridlayout.addWidget(self.server_port, 1, 1, 1, 1)
self.server_host_primary_warning = QtWidgets.QFrame(self.groupBox)
self.server_host_primary_warning.setStyleSheet("QFrame { background-color: #ffc107; color: black }\n"
"QCheckBox { color: black }")
self.server_host_primary_warning.setFrameShape(QtWidgets.QFrame.NoFrame)
self.server_host_primary_warning.setObjectName("server_host_primary_warning")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.server_host_primary_warning)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_4 = QtWidgets.QLabel(self.server_host_primary_warning)
self.label_4.setWordWrap(True)
self.label_4.setObjectName("label_4")
self.verticalLayout_4.addWidget(self.label_4)
self.use_server_for_submission = QtWidgets.QCheckBox(self.server_host_primary_warning)
self.use_server_for_submission.setObjectName("use_server_for_submission")
self.verticalLayout_4.addWidget(self.use_server_for_submission)
self.gridlayout.addWidget(self.server_host_primary_warning, 3, 0, 1, 2)
self.server_host = QtWidgets.QComboBox(self.groupBox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.server_host.sizePolicy().hasHeightForWidth())
self.server_host.setSizePolicy(sizePolicy)
self.server_host.setEditable(True)
self.server_host.setObjectName("server_host")
self.gridlayout.addWidget(self.server_host, 1, 0, 1, 1)
self.label_7 = QtWidgets.QLabel(self.groupBox)
self.label_7.setObjectName("label_7")
self.gridlayout.addWidget(self.label_7, 0, 1, 1, 1)
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setObjectName("label")
self.gridlayout.addWidget(self.label, 0, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(1, 4, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridlayout.addItem(spacerItem, 2, 0, 1, 1)
self.vboxlayout.addWidget(self.groupBox)
self.rename_files_2 = QtWidgets.QGroupBox(GeneralOptionsPage)
self.rename_files_2.setObjectName("rename_files_2")
self.gridlayout1 = QtWidgets.QGridLayout(self.rename_files_2)
self.gridlayout1.setSpacing(2)
self.gridlayout1.setObjectName("gridlayout1")
self.login = QtWidgets.QPushButton(self.rename_files_2)
self.login.setObjectName("login")
self.gridlayout1.addWidget(self.login, 1, 0, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridlayout1.addItem(spacerItem1, 1, 2, 1, 1)
self.logout = QtWidgets.QPushButton(self.rename_files_2)
self.logout.setObjectName("logout")
self.gridlayout1.addWidget(self.logout, 1, 1, 1, 1)
self.logged_in = QtWidgets.QLabel(self.rename_files_2)
self.logged_in.setText("")
self.logged_in.setObjectName("logged_in")
self.gridlayout1.addWidget(self.logged_in, 0, 0, 1, 3)
self.vboxlayout.addWidget(self.rename_files_2)
self.groupBox_2 = QtWidgets.QGroupBox(GeneralOptionsPage)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout.setObjectName("verticalLayout")
self.analyze_new_files = QtWidgets.QCheckBox(self.groupBox_2)
self.analyze_new_files.setObjectName("analyze_new_files")
self.verticalLayout.addWidget(self.analyze_new_files)
self.ignore_file_mbids = QtWidgets.QCheckBox(self.groupBox_2)
self.ignore_file_mbids.setObjectName("ignore_file_mbids")
self.verticalLayout.addWidget(self.ignore_file_mbids)
self.vboxlayout.addWidget(self.groupBox_2)
self.update_check_groupbox = QtWidgets.QGroupBox(GeneralOptionsPage)
self.update_check_groupbox.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.update_check_groupbox.sizePolicy().hasHeightForWidth())
self.update_check_groupbox.setSizePolicy(sizePolicy)
self.update_check_groupbox.setObjectName("update_check_groupbox")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.update_check_groupbox)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.check_for_updates = QtWidgets.QCheckBox(self.update_check_groupbox)
self.check_for_updates.setObjectName("check_for_updates")
self.verticalLayout_2.addWidget(self.check_for_updates)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setContentsMargins(-1, -1, -1, 0)
self.gridLayout.setObjectName("gridLayout")
self.label_2 = QtWidgets.QLabel(self.update_check_groupbox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.update_check_days = QtWidgets.QSpinBox(self.update_check_groupbox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.update_check_days.sizePolicy().hasHeightForWidth())
self.update_check_days.setSizePolicy(sizePolicy)
self.update_check_days.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.update_check_days.setMinimum(1)
self.update_check_days.setObjectName("update_check_days")
self.gridLayout.addWidget(self.update_check_days, 0, 1, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setContentsMargins(-1, -1, -1, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label_3 = QtWidgets.QLabel(self.update_check_groupbox)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 0, 0, 1, 1)
self.update_level = QtWidgets.QComboBox(self.update_check_groupbox)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.update_level.sizePolicy().hasHeightForWidth())
self.update_level.setSizePolicy(sizePolicy)
self.update_level.setEditable(False)
self.update_level.setObjectName("update_level")
self.gridLayout_2.addWidget(self.update_level, 0, 1, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout_2)
self.vboxlayout.addWidget(self.update_check_groupbox)
spacerItem2 = QtWidgets.QSpacerItem(181, 21, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.vboxlayout.addItem(spacerItem2)
self.retranslateUi(GeneralOptionsPage)
QtCore.QMetaObject.connectSlotsByName(GeneralOptionsPage)
GeneralOptionsPage.setTabOrder(self.server_host, self.server_port)
GeneralOptionsPage.setTabOrder(self.server_port, self.use_server_for_submission)
GeneralOptionsPage.setTabOrder(self.use_server_for_submission, self.login)
GeneralOptionsPage.setTabOrder(self.login, self.logout)
GeneralOptionsPage.setTabOrder(self.logout, self.analyze_new_files)
GeneralOptionsPage.setTabOrder(self.analyze_new_files, self.ignore_file_mbids)
GeneralOptionsPage.setTabOrder(self.ignore_file_mbids, self.check_for_updates)
GeneralOptionsPage.setTabOrder(self.check_for_updates, self.update_check_days)
GeneralOptionsPage.setTabOrder(self.update_check_days, self.update_level)
def retranslateUi(self, GeneralOptionsPage):
_translate = QtCore.QCoreApplication.translate
self.groupBox.setTitle(_("MusicBrainz Server"))
self.label_4.setText(_("You have configured an unofficial MusicBrainz server. By default submissions of releases, recordings and disc IDs will go to the primary database on musicbrainz.org."))
self.use_server_for_submission.setText(_("Submit data to the configured server"))
self.label_7.setText(_("Port:"))
self.label.setText(_("Server address:"))
self.rename_files_2.setTitle(_("MusicBrainz Account"))
self.login.setText(_("Log in"))
self.logout.setText(_("Log out"))
self.groupBox_2.setTitle(_("General"))
self.analyze_new_files.setText(_("Automatically scan all new files"))
self.ignore_file_mbids.setText(_("Ignore MBIDs when loading new files"))
self.update_check_groupbox.setTitle(_("Update Checking"))
self.check_for_updates.setText(_("Check for updates during start-up"))
self.label_2.setText(_("Days between checks:"))
self.label_3.setText(_("Updates to check:"))
|
mbayon/TFG-MachineLearning
|
refs/heads/master
|
venv/lib/python3.6/site-packages/numpy/testing/nosetester.py
|
36
|
"""
Nose test running.
This module implements ``test()`` and ``bench()`` functions for NumPy modules.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import warnings
from numpy.compat import basestring
import numpy as np
from .utils import import_nose, suppress_warnings
def get_package_name(filepath):
"""
Given a path where a package is installed, determine its name.
Parameters
----------
filepath : str
Path to a file. If the determination fails, "numpy" is returned.
Examples
--------
>>> np.testing.nosetester.get_package_name('nonsense')
'numpy'
"""
fullpath = filepath[:]
pkg_name = []
while 'site-packages' in filepath or 'dist-packages' in filepath:
filepath, p2 = os.path.split(filepath)
if p2 in ('site-packages', 'dist-packages'):
break
pkg_name.append(p2)
# if package name determination failed, just default to numpy/scipy
if not pkg_name:
if 'scipy' in fullpath:
return 'scipy'
else:
return 'numpy'
# otherwise, reverse to get correct order and return
pkg_name.reverse()
# don't include the outer egg directory
if pkg_name[0].endswith('.egg'):
pkg_name.pop(0)
return '.'.join(pkg_name)
def run_module_suite(file_to_run=None, argv=None):
"""
Run a test module.
Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
the command line
Parameters
----------
file_to_run : str, optional
Path to test module, or None.
By default, run the module from which this function is called.
argv : list of strings
Arguments to be passed to the nose test runner. ``argv[0]`` is
ignored. All command line arguments accepted by ``nosetests``
will work. If it is the default value None, sys.argv is used.
.. versionadded:: 1.9.0
Examples
--------
Adding the following::
if __name__ == "__main__" :
run_module_suite(argv=sys.argv)
at the end of a test module will run the tests when that module is
called in the python interpreter.
Alternatively, calling::
>>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
from an interpreter will run all the test routine in 'test_matlib.py'.
"""
if file_to_run is None:
f = sys._getframe(1)
file_to_run = f.f_locals.get('__file__', None)
if file_to_run is None:
raise AssertionError
if argv is None:
argv = sys.argv + [file_to_run]
else:
argv = argv + [file_to_run]
nose = import_nose()
from .noseclasses import KnownFailurePlugin
nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
class NoseTester(object):
"""
Nose test runner.
This class is made available as numpy.testing.Tester, and a test function
is typically added to a package's __init__.py like so::
from numpy.testing import Tester
test = Tester().test
Calling this test function finds and runs all tests associated with the
package and all its sub-packages.
Attributes
----------
package_path : str
Full path to the package to test.
package_name : str
Name of the package to test.
Parameters
----------
package : module, str or None, optional
The package to test. If a string, this should be the full path to
the package. If None (default), `package` is set to the module from
which `NoseTester` is initialized.
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
of being shown once during the test execution. Valid strings are:
- "develop" : equals ``(Warning,)``
- "release" : equals ``()``, don't raise on any warnings.
Default is "release".
depth : int, optional
If `package` is None, then this can be used to initialize from the
module of the caller of (the caller of (...)) the code that
initializes `NoseTester`. Default of 0 means the module of the
immediate caller; higher values are useful for utility routines that
want to initialize `NoseTester` objects on behalf of other code.
"""
def __init__(self, package=None, raise_warnings="release", depth=0):
# Back-compat: 'None' used to mean either "release" or "develop"
# depending on whether this was a release or develop version of
# numpy. Those semantics were fine for testing numpy, but not so
# helpful for downstream projects like scipy that use
# numpy.testing. (They want to set this based on whether *they* are a
# release or develop version, not whether numpy is.) So we continue to
# accept 'None' for back-compat, but it's now just an alias for the
# default "release".
if raise_warnings is None:
raise_warnings = "release"
package_name = None
if package is None:
f = sys._getframe(1 + depth)
package_path = f.f_locals.get('__file__', None)
if package_path is None:
raise AssertionError
package_path = os.path.dirname(package_path)
package_name = f.f_locals.get('__name__', None)
elif isinstance(package, type(os)):
package_path = os.path.dirname(package.__file__)
package_name = getattr(package, '__name__', None)
else:
package_path = str(package)
self.package_path = package_path
# Find the package name under test; this name is used to limit coverage
# reporting (if enabled).
if package_name is None:
package_name = get_package_name(package_path)
self.package_name = package_name
# Set to "release" in constructor in maintenance branches.
self.raise_warnings = raise_warnings
def _test_argv(self, label, verbose, extra_argv):
''' Generate argv for nosetest command
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
see ``test`` docstring
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
Returns
-------
argv : list
command line arguments that will be passed to nose
'''
argv = [__file__, self.package_path, '-s']
if label and label != 'full':
if not isinstance(label, basestring):
raise TypeError('Selection label should be a string')
if label == 'fast':
label = 'not slow'
argv += ['-A', label]
argv += ['--verbosity', str(verbose)]
# When installing with setuptools, and also in some other cases, the
# test_*.py files end up marked +x executable. Nose, by default, does
# not run files marked with +x as they might be scripts. However, in
# our case nose only looks for test_*.py files under the package
# directory, which should be safe.
argv += ['--exe']
if extra_argv:
argv += extra_argv
return argv
def _show_system_info(self):
nose = import_nose()
import numpy
print("NumPy version %s" % numpy.__version__)
relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
npdir = os.path.dirname(numpy.__file__)
print("NumPy is installed in %s" % npdir)
if 'scipy' in self.package_name:
import scipy
print("SciPy version %s" % scipy.__version__)
spdir = os.path.dirname(scipy.__file__)
print("SciPy is installed in %s" % spdir)
pyversion = sys.version.replace('\n', '')
print("Python version %s" % pyversion)
print("nose version %d.%d.%d" % nose.__versioninfo__)
def _get_custom_doctester(self):
""" Return instantiated plugin for doctests
Allows subclassing of this class to override doctester
A return value of None means use the nose builtin doctest plugin
"""
from .noseclasses import NumpyDoctest
return NumpyDoctest()
def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False):
"""
Run tests for module using nose.
This method does the heavy lifting for the `test` method. It takes all
the same arguments, for details see `test`.
See Also
--------
test
"""
# fail with nice error message if nose is not present
import_nose()
# compile argv
argv = self._test_argv(label, verbose, extra_argv)
# our way of doing coverage
if coverage:
argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
'--cover-tests', '--cover-erase']
# construct list of plugins
import nose.plugins.builtin
from .noseclasses import KnownFailurePlugin, Unplugger
plugins = [KnownFailurePlugin()]
plugins += [p() for p in nose.plugins.builtin.plugins]
# add doctesting if required
doctest_argv = '--with-doctest' in argv
if doctests == False and doctest_argv:
doctests = True
plug = self._get_custom_doctester()
if plug is None:
# use standard doctesting
if doctests and not doctest_argv:
argv += ['--with-doctest']
else: # custom doctesting
if doctest_argv: # in fact the unplugger would take care of this
argv.remove('--with-doctest')
plugins += [Unplugger('doctest'), plug]
if doctests:
argv += ['--with-' + plug.name]
return argv, plugins
def test(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, raise_warnings=None):
"""
Run tests for module using nose.
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
Identifies the tests to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow tests as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
attribute_identifier - string passed directly to nosetests as '-A'.
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
doctests : bool, optional
If True, run doctests in module. Default is False.
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
(This requires the `coverage module:
<http://nedbatchelder.com/code/modules/coverage.html>`_).
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
of being shown once during the test execution. Valid strings are:
- "develop" : equals ``(Warning,)``
- "release" : equals ``()``, don't raise on any warnings.
The default is to use the class initialization value.
Returns
-------
result : object
Returns the result of running the tests as a
``nose.result.TextTestResult`` object.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for it.
For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
Running unit tests for numpy.lib
...
Ran 976 tests in 3.933s
OK
>>> result.errors #doctest: +SKIP
[]
>>> result.knownfail #doctest: +SKIP
[]
"""
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = min(verbose, 3)
from . import utils
utils.verbose = verbose
if doctests:
print("Running unit tests and doctests for %s" % self.package_name)
else:
print("Running unit tests for %s" % self.package_name)
self._show_system_info()
# reset doctest state on every run
import doctest
doctest.master = None
if raise_warnings is None:
raise_warnings = self.raise_warnings
_warn_opts = dict(develop=(Warning,),
release=())
if isinstance(raise_warnings, basestring):
raise_warnings = _warn_opts[raise_warnings]
with suppress_warnings("location") as sup:
# Reset the warning filters to the default state,
# so that running the tests is more repeatable.
warnings.resetwarnings()
# Set all warnings to 'warn', this is because the default 'once'
# has the bad property of possibly shadowing later warnings.
warnings.filterwarnings('always')
# Force the requested warnings to raise
for warningtype in raise_warnings:
warnings.filterwarnings('error', category=warningtype)
# Filter out annoying import messages.
sup.filter(message='Not importing directory')
sup.filter(message="numpy.dtype size changed")
sup.filter(message="numpy.ufunc size changed")
sup.filter(category=np.ModuleDeprecationWarning)
# Filter out boolean '-' deprecation messages. This allows
# older versions of scipy to test without a flood of messages.
sup.filter(message=".*boolean negative.*")
sup.filter(message=".*boolean subtract.*")
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
with warnings.catch_warnings():
warnings.simplefilter("always")
from ..distutils import cpuinfo
sup.filter(category=UserWarning, module=cpuinfo)
# See #7949: Filter out deprecation warnings due to the -3 flag to
# python 2
if sys.version_info.major == 2 and sys.py3kwarning:
# This is very specific, so using the fragile module filter
# is fine
import threading
sup.filter(DeprecationWarning,
r"sys\.exc_clear\(\) not supported in 3\.x",
module=threading)
sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
# Filter out some deprecation warnings inside nose 1.3.7 when run
# on python 3.5b2. See
# https://github.com/nose-devs/nose/issues/929
# Note: it is hard to filter based on module for sup (lineno could
# be implemented).
warnings.filterwarnings("ignore", message=".*getargspec.*",
category=DeprecationWarning,
module=r"nose\.")
from .noseclasses import NumpyTestProgram
argv, plugins = self.prepare_test_args(
label, verbose, extra_argv, doctests, coverage)
t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
return t.result
def bench(self, label='fast', verbose=1, extra_argv=None):
"""
Run benchmarks for module using nose.
Parameters
----------
label : {'fast', 'full', '', attribute identifier}, optional
Identifies the benchmarks to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow benchmarks as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
attribute_identifier - string passed directly to nosetests as '-A'.
verbose : int, optional
Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to nosetests.
Returns
-------
success : bool
Returns True if running the benchmarks works, False if an error
occurred.
Notes
-----
Benchmarks are like tests, but have names starting with "bench" instead
of "test", and can be found under the "benchmarks" sub-directory of the
module.
Each NumPy module exposes `bench` in its namespace to run all benchmarks
for it.
Examples
--------
>>> success = np.lib.bench() #doctest: +SKIP
Running benchmarks for numpy.lib
...
using 562341 items:
unique:
0.11
unique1d:
0.11
ratio: 1.0
nUnique: 56230 == 56230
...
OK
>>> success #doctest: +SKIP
True
"""
print("Running benchmarks for %s" % self.package_name)
self._show_system_info()
argv = self._test_argv(label, verbose, extra_argv)
argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
# import nose or make informative error
nose = import_nose()
# get plugin to disable doctests
from .noseclasses import Unplugger
add_plugins = [Unplugger('doctest')]
return nose.run(argv=argv, addplugins=add_plugins)
def _numpy_tester():
if hasattr(np, "__version__") and ".dev0" in np.__version__:
mode = "develop"
else:
mode = "release"
return NoseTester(raise_warnings=mode, depth=1)
|
nubark/odoo
|
refs/heads/9.0
|
addons/base_gengo/ir_translation.py
|
46
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import api
from openerp.osv import fields, osv
from openerp.exceptions import UserError
LANG_CODE_MAPPING = {
'ar_SY': ('ar', 'Arabic'),
'id_ID': ('id', 'Indonesian'),
'nl_NL': ('nl', 'Dutch'),
'fr_CA': ('fr-ca', 'French (Canada)'),
'pl_PL': ('pl', 'Polish'),
'zh_TW': ('zh-tw', 'Chinese (Traditional)'),
'sv_SE': ('sv', 'Swedish'),
'ko_KR': ('ko', 'Korean'),
'pt_PT': ('pt', 'Portuguese (Europe)'),
'en_US': ('en', 'English'),
'ja_JP': ('ja', 'Japanese'),
'es_ES': ('es', 'Spanish (Spain)'),
'zh_CN': ('zh', 'Chinese (Simplified)'),
'de_DE': ('de', 'German'),
'fr_FR': ('fr', 'French'),
'fr_BE': ('fr', 'French'),
'ru_RU': ('ru', 'Russian'),
'it_IT': ('it', 'Italian'),
'pt_BR': ('pt-br', 'Portuguese (Brazil)'),
'th_TH': ('th', 'Thai'),
'nb_NO': ('no', 'Norwegian'),
'ro_RO': ('ro', 'Romanian'),
'tr_TR': ('tr', 'Turkish'),
'bg_BG': ('bg', 'Bulgarian'),
'da_DK': ('da', 'Danish'),
'en_GB': ('en-gb', 'English (British)'),
'el_GR': ('el', 'Greek'),
'vi_VN': ('vi', 'Vietnamese'),
'he_IL': ('he', 'Hebrew'),
'hu_HU': ('hu', 'Hungarian'),
'fi_FI': ('fi', 'Finnish')
}
class ir_translation(osv.Model):
_name = "ir.translation"
_inherit = "ir.translation"
_columns = {
'gengo_comment': fields.text("Comments & Activity Linked to Gengo"),
'order_id': fields.char('Gengo Order ID', size=32),
"gengo_translation": fields.selection([('machine', 'Translation By Machine'),
('standard', 'Standard'),
('pro', 'Pro'),
('ultra', 'Ultra')], "Gengo Translation Service Level", help='You can select here the service level you want for an automatic translation using Gengo.'),
}
def _get_all_supported_languages(self, cr, uid, context=None):
flag, gengo = self.pool.get('base.gengo.translations').gengo_authentication(cr, uid, context=context)
if not flag:
raise UserError(gengo)
supported_langs = {}
lang_pair = gengo.getServiceLanguagePairs(lc_src='en')
if lang_pair['opstat'] == 'ok':
for g_lang in lang_pair['response']:
if g_lang['lc_tgt'] not in supported_langs:
supported_langs[g_lang['lc_tgt']] = []
supported_langs[g_lang['lc_tgt']] += [g_lang['tier']]
return supported_langs
def _get_gengo_corresponding_language(cr, lang):
return lang in LANG_CODE_MAPPING and LANG_CODE_MAPPING[lang][0] or lang
def _get_source_query(self, cr, uid, name, types, lang, source, res_id):
query, params = super(ir_translation, self)._get_source_query(cr, uid, name, types, lang, source, res_id)
query += """
ORDER BY
CASE
WHEN gengo_translation=%s then 10
WHEN gengo_translation=%s then 20
WHEN gengo_translation=%s then 30
WHEN gengo_translation=%s then 40
ELSE 0
END DESC
"""
params += ('machine', 'standard', 'ultra', 'pro',)
return (query, params)
@api.model
def _get_terms_query(self, field, records):
query, params = super(ir_translation, self)._get_terms_query(field, records)
# order translations from worst to best
query += """
ORDER BY
CASE
WHEN gengo_translation=%s then 10
WHEN gengo_translation=%s then 20
WHEN gengo_translation=%s then 30
WHEN gengo_translation=%s then 40
ELSE 0
END ASC
"""
params += ('machine', 'standard', 'ultra', 'pro')
return query, params
|
faywong/FFPlayer
|
refs/heads/trunk
|
project/jni/python/src/Lib/plat-mac/lib-scriptpackages/StdSuites/QuickDraw_Graphics_Suppleme.py
|
82
|
"""Suite QuickDraw Graphics Supplemental Suite: Defines transformations of graphic objects
Level 1, version 1
Generated from /Volumes/Sap/System Folder/Extensions/AppleScript
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'qdsp'
class QuickDraw_Graphics_Suppleme_Events:
pass
class drawing_area(aetools.ComponentItem):
"""drawing area - Container for graphics and supporting information """
want = 'cdrw'
class _Prop_rotation(aetools.NProperty):
"""rotation - the default rotation for objects in the drawing area """
which = 'prot'
want = 'trot'
class _Prop_scale(aetools.NProperty):
"""scale - the default scaling for objects in the drawing area """
which = 'pscl'
want = 'fixd'
class _Prop_translation(aetools.NProperty):
"""translation - the default repositioning for objects in the drawing area """
which = 'ptrs'
want = 'QDpt'
drawing_areas = drawing_area
class graphic_groups(aetools.ComponentItem):
"""graphic groups - """
want = 'cpic'
graphic_group = graphic_groups
drawing_area._superclassnames = []
drawing_area._privpropdict = {
'rotation' : _Prop_rotation,
'scale' : _Prop_scale,
'translation' : _Prop_translation,
}
drawing_area._privelemdict = {
}
graphic_groups._superclassnames = []
graphic_groups._privpropdict = {
}
graphic_groups._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'cdrw' : drawing_area,
'cpic' : graphic_groups,
}
_propdeclarations = {
'prot' : _Prop_rotation,
'pscl' : _Prop_scale,
'ptrs' : _Prop_translation,
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
noahchense/micropython
|
refs/heads/master
|
tests/basics/dict_get.py
|
118
|
for d in {}, {42:2}:
print(d.get(42))
print(d.get(42,2))
|
chrsrds/scikit-learn
|
refs/heads/master
|
examples/tree/plot_tree_regression.py
|
82
|
"""
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, s=20, edgecolor="black",
c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue",
label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
|
xychix/nessrest
|
refs/heads/master
|
nessrest/credentials.py
|
3
|
class WindowsPassword(object):
'''
Username and password for a Windows target.
'''
category = "Host"
name = "Windows"
def __init__(self, username, password, domain="", auth_method="Password"):
self.username = username
self.password = password
self.domain = domain
self.auth_method = auth_method
class Ssh(object):
'''
Does not provide complete credential information on its own. Create one of
its subclasses instead. The privilege escalation functions can be used on
any subclass.
'''
category = "Host"
name = "SSH"
def __init__(self):
self.elevate_privileges_with = "Nothing"
def cisco_enable(self, enable_password):
self.elevate_privileges_with = "Cisco 'enable'"
self.escalation_password = enable_password
return self
def sudo(self, password, username="root"):
self.elevate_privileges_with = "sudo"
self.escalation_account = username
self.escalation_password = password
return self
class SshPassword(Ssh):
'''
Username and password for an SSH login.
'''
def __init__(self, username, password):
super(SshPassword, self).__init__()
self.auth_method = "password"
self.username = username
self.password = password
class SshPublicKey(Ssh):
'''
SSH certificate login. The private key must have been uploaded already.
'''
def __init__(self, username, private_key_filename, private_key_passphrase):
super(SshPublicKey, self).__init__()
self.auth_method = "public key"
self.username = username
self.private_key = private_key_filename
self.private_key_passphrase = private_key_passphrase
class SshUserCert(SshPublicKey):
'''
SSH client certificate login. The private key and user cert must have been
uploaded already.
'''
def __init__(self, username, user_cert_filename, private_key_filename,
private_key_passphrase):
self.user_cert = user_cert_filename
super(SshUserCert, self) \
.__init__(username=username,
private_key_filename=private_key_filename,
private_key_passphrase=private_key_passphrase)
class AmazonAWS(object):
'''
Access ID and Secret for Amazon AWS
'''
category = "Cloud Services"
name = "Amazon AWS"
def __init__(self, access_key_id, secret_key):
self.access_key_id = access_key_id
self.secret_key = secret_key
class Salesforce(object):
'''
Username and password for Salesforce.com.
'''
category = "Cloud Services"
name = "Salesforce.com"
def __init__(self, username, password):
self.username = username
self.password = password
class PaloAltoPANOS(object):
'''
Username and password for a Palo Alto PAN-OS device through the web API.
'''
category = "Miscellaneous"
name = "Palo Alto Networks PAN-OS"
def __init__(self, username, password, port="443", verify_ssl=True):
self.username = username
self.password = password
self.port = port
self.verify_ssl = verify_ssl
class RHEV(object):
'''
Username and password for a Red Hat Enterprise Virtualization
'''
category = "Miscellaneous"
name = "RHEV"
def __init__(self, username, password, port="443", verify_ssl=True):
self.username = username
self.password = password
self.port = port
self.verify_ssl = verify_ssl
class IBMiSeries(object):
'''
Username and password for a IBM iSeries
'''
category = "Miscellaneous"
name = "IBM iSeries"
def __init__(self, username, password):
self.username = username
self.password = password
class VMwareVCenter(object):
'''
Username and password for a VMware vCenter
'''
category = "Miscellaneous"
name = "VMware vCenter SOAP API"
def __init__(self, username, password, host, https=True, port="443", verify_ssl=True):
self.username = username
self.password = password
self.host = host
self.port = port
self.https = https
self.verify_ssl = verify_ssl
class VMwareESX(object):
'''
Username and password for VMware ESX
'''
category = "Miscellaneous"
name = "VMware ESX SOAP API"
def __init__(self, username, password, dont_verify_ssl=False):
self.username = username
self.password = password
self.dont_verify_ssl = dont_verify_ssl
class Database(object):
'''
Does not provide complete credential information on its own. Create one of
its subclasses instead. The privilege escalation functions can be used on
any subclass.
'''
category = "Database"
name = "Database"
def __init__(self, username, password, port, type):
self.type = type
self.username = username
self.password = password
self.port = port
class DB2(Database):
'''
Username and password for Database DB2
'''
def __init__(self, username, password, db_sid, port=50000):
super(DB2, self).__init__(username, password, port, "DB2")
self.db_sid = db_sid # dbname
class Oracle(Database):
'''
Username and password for Database Oracle
'''
def __init__(self, username, password, oracle_sid, port=1521, oracle_auth_type="SYSDBA"):
super(Oracle, self).__init__(username, password, port, "Oracle")
self.oracle_sid = oracle_sid # sid
self.oracle_auth_type = oracle_auth_type # SYSDBA, SYSOPER, NORMAL
class MySQL(Database):
'''
Username and password for Database MySQL
'''
def __init__(self, username, password, port=3306):
super(MySQL, self).__init__(username, password, port, "MySQL")
class PostgreSQL(Database):
'''
Username and password for Database PostgreSQL
'''
def __init__(self, username, password, port=5432):
super(PostgreSQL, self).__init__(username, password, port, "PostgreSQL")
class SQLServer(Database):
'''
Username and password for Database SQL Server
'''
def __init__(self, username, password, db_sid="", port=1433, auth_type="SQL"):
super(SQLServer, self).__init__(username, password, port, "SQL Server")
self.db_sid = db_sid # instance
self.sql_server_auth_type = auth_type # SQL, Windows
class MongoDB(object):
'''
Username and password for MongoDB
'''
category = "Database"
name = "MongoDB"
def __init__(self, username, password, database="admin", port=27017):
self.username = username
self.password = password
self.port = port
self.database = database # admin
|
cydcowley/Imperial-Visualizations
|
refs/heads/master
|
visuals_maths/Linear Algebra/Python/pointslinesplanes/utils.py
|
1
|
"""utils.py"""
import numpy as np
def mesh2d(xlim, ylim, n=5):
"""Create 2d mesh in sepecifies x and y axes limits, with number of points for every dimension separate."""
if isinstance(n, int):
xx = np.linspace(xlim[0],xlim[1],n)
yy = np.linspace(ylim[0],ylim[1],n)
elif isinstance(n, list):
xx = np.linspace(xlim[0],xlim[1],n[0])
yy = np.linspace(ylim[0],ylim[1],n[1])
else:
raise ValueError("Wrong number of points parameter")
return np.meshgrid(xx, yy)
def normalize(v):
"""Normalizes a 3d vector v, returns a 3d vector."""
magnitude = np.sqrt(v[0]**2+v[1]**2+v[2]**2)
if magnitude==0:
raise ValueError("Zero vector cannot be normalized.")
else:
return v/magnitude
|
kleinfeld/medpy
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
# version: 0.1.2
import os
# setuptools >= 0.7 supports 'python setup.py develop'
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# The maxflow graphcut wrapper using boost.python
maxflow = Extension('medpy.graphcut.maxflow',
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '1')],
sources = ['lib/maxflow/src/maxflow.cpp', 'lib/maxflow/src/wrapper.cpp', 'lib/maxflow/src/graph.cpp'],
libraries = ['boost_python'],
extra_compile_args = ['-O0'])
setup(name='MedPy',
version='0.1.0', # major.minor.micro
description='Medical image processing in Python',
author='Oskar Maier',
author_email='[email protected]',
url='https://github.com/loli/medpy',
license='LICENSE.txt',
keywords='medical image processing dicom itk insight tool kit MRI CT US graph cut max-flow min-cut',
long_description=read('README.txt'),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Other Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
#'Operating System :: MacOS :: MacOS X',
#'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: C++',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Scientific/Engineering :: Image Recognition'
],
install_requires=[
"scipy >= 0.9.0",
"numpy >= 1.6.1",
],
extras_require = {
'Nifti/Analyze': ["nibabel >= 1.3.0", "RXP"],
'Dicom': ["pydicom >= 0.9.7"],
'Additional image formats' : ["itk >= 3.16.0"]
},
packages = [
'medpy',
'medpy.core',
'medpy.features',
'medpy.filter',
'medpy.graphcut',
'medpy.io',
'medpy.itkvtk',
'medpy.itkvtk.filter',
'medpy.itkvtk.utilities',
'medpy.metric',
'medpy.occlusion',
'medpy.utilities'
],
scripts=[
'bin/medpy_anisotropic_diffusion.py',
'bin/medpy_apparent_diffusion_coefficient.py',
'bin/medpy_check_marker_intersection.py',
'bin/medpy_convert.py',
'bin/medpy_count_labels.py',
'bin/medpy_create_empty_volume_by_example.py',
'bin/medpy_dicom_slices_to_volume.py',
'bin/medpy_dicom_to_4D.py',
'bin/medpy_diff.py',
'bin/medpy_evaluate_miccai2007.py',
'bin/medpy_extract_min_max.py',
'bin/medpy_extract_sub_volume_auto.py',
'bin/medpy_extract_sub_volume_by_example.py',
'bin/medpy_extract_sub_volume.py',
'bin/medpy_gradient.py',
'bin/medpy_graphcut_label.py',
'bin/medpy_graphcut_label_bgreduced.py',
'bin/medpy_graphcut_label_w_regional.py',
'bin/medpy_graphcut_label_wsplit.py',
'bin/medpy_graphcut_voxel.py',
'bin/medpy_grid.py',
'bin/medpy_info.py',
'bin/medpy_intensity_range_standardization.py',
'bin/medpy_itk_gradient.py',
'bin/medpy_itk_smoothing.py',
'bin/medpy_itk_watershed.py',
'bin/medpy_join_xd_to_xplus1d.py',
'bin/medpy_merge.py',
'bin/medpy_morphology.py',
'bin/medpy_occlusion.py',
'bin/medpy_reduce.py',
'bin/medpy_resample.py',
'bin/medpy_reslice_3d_to_4d.py',
'bin/medpy_set_pixel_spacing.py',
'bin/medpy_shrink_image.py',
'bin/medpy_split_xd_to_xminus1d.py',
'bin/medpy_stack_sub_volumes.py',
'bin/medpy_superimposition.py',
'bin/medpy_swap_dimensions.py',
'bin/medpy_zoom_image.py'
],
ext_modules = [maxflow],
)
|
Klaudit/livestreamer
|
refs/heads/develop
|
src/livestreamer/plugins/letontv.py
|
34
|
import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.stream import RTMPStream
PLAYER_URL = "http://leton.tv/player.php"
SWF_URL = "http://files.leton.tv/jwplayer.flash.swf"
_url_re = re.compile("""
http?://(\w+.)?leton.tv
(?:
/player\.php\?.*streampage=
)?
(?:
/broadcast/
)?
(?P<streampage>[^/?&]+)
""", re.VERBOSE)
_js_var_re = re.compile("var (?P<var>\w+)\s?=\s?'?(?P<value>[^;']+)'?;")
_rtmp_re = re.compile("/(?P<app>[^/]+)/(?P<playpath>.+)")
def _parse_server_ip(values):
octets = [
values["a"] / values["f"],
values["b"] / values["f"],
values["c"] / values["f"],
values["d"] / values["f"],
]
return ".".join(str(int(octet)) for octet in octets)
_schema = validate.Schema(
validate.transform(_js_var_re.findall),
validate.transform(dict),
{
"a": validate.transform(int),
"b": validate.transform(int),
"c": validate.transform(int),
"d": validate.transform(int),
"f": validate.transform(int),
"v_part": validate.text,
},
validate.union({
"server_ip": validate.transform(_parse_server_ip),
"path": validate.all(
validate.get("v_part"),
validate.transform(_rtmp_re.findall),
validate.get(0)
)
})
)
class LetOnTV(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
info = http.get(PLAYER_URL, params=match.groupdict(), schema=_schema)
if not info["path"]:
return
app, playpath = info["path"]
stream = RTMPStream(self.session, {
"rtmp": "rtmp://{0}/{1}".format(info["server_ip"], app),
"playpath": playpath,
"pageUrl": self.url,
"swfUrl": SWF_URL,
"live": True
})
return dict(live=stream)
__plugin__ = LetOnTV
|
pechersky/keras-molecules
|
refs/heads/master
|
preprocess.py
|
4
|
import argparse
import pandas
import h5py
import numpy as np
from molecules.utils import one_hot_array, one_hot_index
from sklearn.model_selection import train_test_split
MAX_NUM_ROWS = 500000
SMILES_COL_NAME = 'structure'
def get_arguments():
parser = argparse.ArgumentParser(description='Prepare data for training')
parser.add_argument('infile', type=str, help='Input file name')
parser.add_argument('outfile', type=str, help='Output file name')
parser.add_argument('--length', type=int, metavar='N', default = MAX_NUM_ROWS,
help='Maximum number of rows to include (randomly sampled).')
parser.add_argument('--smiles_column', type=str, default = SMILES_COL_NAME,
help="Name of the column that contains the SMILES strings. Default: %s" % SMILES_COL_NAME)
parser.add_argument('--property_column', type=str,
help="Name of the column that contains the property values to predict. Default: None")
return parser.parse_args()
def chunk_iterator(dataset, chunk_size=1000):
chunk_indices = np.array_split(np.arange(len(dataset)),
len(dataset)/chunk_size)
for chunk_ixs in chunk_indices:
chunk = dataset[chunk_ixs]
yield (chunk_ixs, chunk)
raise StopIteration
def main():
args = get_arguments()
data = pandas.read_hdf(args.infile, 'table')
keys = data[args.smiles_column].map(len) < 121
if args.length <= len(keys):
data = data[keys].sample(n = args.length)
else:
data = data[keys]
structures = data[args.smiles_column].map(lambda x: list(x.ljust(120)))
if args.property_column:
properties = data[args.property_column][keys]
del data
train_idx, test_idx = map(np.array,
train_test_split(structures.index, test_size = 0.20))
charset = list(reduce(lambda x, y: set(y) | x, structures, set()))
one_hot_encoded_fn = lambda row: map(lambda x: one_hot_array(x, len(charset)),
one_hot_index(row, charset))
h5f = h5py.File(args.outfile, 'w')
h5f.create_dataset('charset', data = charset)
def create_chunk_dataset(h5file, dataset_name, dataset, dataset_shape,
chunk_size=1000, apply_fn=None):
new_data = h5file.create_dataset(dataset_name, dataset_shape,
chunks=tuple([chunk_size]+list(dataset_shape[1:])))
for (chunk_ixs, chunk) in chunk_iterator(dataset):
if not apply_fn:
new_data[chunk_ixs, ...] = chunk
else:
new_data[chunk_ixs, ...] = apply_fn(chunk)
create_chunk_dataset(h5f, 'data_train', train_idx,
(len(train_idx), 120, len(charset)),
apply_fn=lambda ch: np.array(map(one_hot_encoded_fn,
structures[ch])))
create_chunk_dataset(h5f, 'data_test', test_idx,
(len(test_idx), 120, len(charset)),
apply_fn=lambda ch: np.array(map(one_hot_encoded_fn,
structures[ch])))
if args.property_column:
h5f.create_dataset('property_train', data = properties[train_idx])
h5f.create_dataset('property_test', data = properties[test_idx])
h5f.close()
if __name__ == '__main__':
main()
|
epiphany27/NewsBlur
|
refs/heads/master
|
vendor/readability/htmls.py
|
12
|
from lxml.html import tostring
import logging
import lxml.html
import re, sys
from .cleaners import normalize_spaces, clean_attributes
from .encoding import get_encoding
utf8_parser = lxml.html.HTMLParser(encoding='utf-8')
def build_doc(page):
if isinstance(page, unicode):
enc = None
page_unicode = page
else:
enc = get_encoding(page) or 'utf-8'
page_unicode = page.decode(enc, 'replace')
doc = lxml.html.document_fromstring(page_unicode.encode('utf-8', 'replace'), parser=utf8_parser)
return doc, enc
def js_re(src, pattern, flags, repl):
return re.compile(pattern, flags).sub(src, repl.replace('$', '\\'))
def normalize_entities(cur_title):
entities = {
u'\u2014':'-',
u'\u2013':'-',
u'—': '-',
u'–': '-',
u'\u00A0': ' ',
u'\u00AB': '"',
u'\u00BB': '"',
u'"': '"',
}
for c, r in entities.iteritems():
if c in cur_title:
cur_title = cur_title.replace(c, r)
return cur_title
def norm_title(title):
return normalize_entities(normalize_spaces(title))
def get_title(doc):
title = doc.find('.//title')
if title is None or title.text is None or len(title.text) == 0:
return '[no-title]'
return norm_title(title.text)
def add_match(collection, text, orig):
text = norm_title(text)
if len(text.split()) >= 2 and len(text) >= 15:
if text.replace('"', '') in orig.replace('"', ''):
collection.add(text)
def shorten_title(doc):
title = doc.find('.//title')
if title is None or title.text is None or len(title.text) == 0:
return ''
title = orig = norm_title(title.text)
candidates = set()
for item in ['.//h1', './/h2', './/h3']:
for e in list(doc.iterfind(item)):
if e.text:
add_match(candidates, e.text, orig)
if e.text_content():
add_match(candidates, e.text_content(), orig)
for item in ['#title', '#head', '#heading', '.pageTitle', '.news_title', '.title', '.head', '.heading', '.contentheading', '.small_header_red']:
for e in doc.cssselect(item):
if e.text:
add_match(candidates, e.text, orig)
if e.text_content():
add_match(candidates, e.text_content(), orig)
if candidates:
title = sorted(candidates, key=len)[-1]
else:
for delimiter in [' | ', ' - ', ' :: ', ' / ']:
if delimiter in title:
parts = orig.split(delimiter)
if len(parts[0].split()) >= 4:
title = parts[0]
break
elif len(parts[-1].split()) >= 4:
title = parts[-1]
break
else:
if ': ' in title:
parts = orig.split(': ')
if len(parts[-1].split()) >= 4:
title = parts[-1]
else:
title = orig.split(': ', 1)[1]
if not 15 < len(title) < 150:
return orig
return title
def get_body(doc):
[ elem.drop_tree() for elem in doc.xpath('.//script | .//link | .//style') ]
raw_html = unicode(tostring(doc.body or doc))
cleaned = clean_attributes(raw_html)
try:
#BeautifulSoup(cleaned) #FIXME do we really need to try loading it?
return cleaned
except Exception: #FIXME find the equivalent lxml error
#logging.error("cleansing broke html content: %s\n---------\n%s" % (raw_html, cleaned))
return raw_html
|
jwalgran/otm-core
|
refs/heads/develop
|
opentreemap/otm_comments/migrations/0003_auto_20160923_1413.py
|
4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('otm_comments', '0002_auto_20150630_1556'),
]
operations = [
migrations.AlterModelOptions(
name='enhancedthreadedcomment',
options={'ordering': ('submit_date',), 'verbose_name': 'comment', 'verbose_name_plural': 'comments', 'permissions': [('can_moderate', 'Can moderate comments')]},
),
]
|
machinebrains/neat-python
|
refs/heads/master
|
examples/xor/xor2_array.py
|
1
|
"""
A parallel version of XOR using neatsociety.parallel with numpy arrays.
Since XOR is a simple experiment, a parallel version probably won't run any
faster than the single-process version, due to the overhead of
inter-process communication.
If your evaluation function is what's taking up most of your processing time
(and you should probably check by using a profiler while running
single-process), you should see a significant performance improvement by
evaluating in parallel.
This example is only intended to show how to do a parallel experiment
in neatsociety-python. You can of course roll your own parallelism mechanism
or inherit from ParallelEvaluator if you need to do something more complicated.
"""
from __future__ import print_function
import math
import os
import time
import numpy as np
from neatsociety import nn, parallel, population, visualize
# Network inputs and expected outputs.
xor_inputs = np.asarray(((0, 0), (0, 1), (1, 0), (1, 1)))
xor_outputs = np.asarray([0, 1, 1, 0])
xor_outputs = np.reshape(xor_outputs,(-1,1))
xor_sample_size = xor_outputs.shape[0]
def fitness(genome):
"""
This function will be run in parallel by ParallelEvaluator. It takes one
argument (a single genome) and should return one float (that genome's fitness).
Note that this function needs to be in module scope for multiprocessing.Pool
(which is what ParallelEvaluator uses) to find it. Because of this, make
sure you check for __main__ before executing any code (as we do here in the
last two lines in the file), otherwise you'll have made a fork bomb
instead of a neuroevolution demo. :)
"""
net = nn.create_feed_forward_phenotype(genome)
error = 0.0
outputs = net.array_activate(xor_inputs)
sum_square_errors = (xor_outputs - outputs) ** 2
error_sum = np.sum(sum_square_errors)
return 1.0 - np.sqrt(error_sum / xor_sample_size)
def run():
t0 = time.time()
# Get the path to the config file, which is assumed to live in
# the same directory as this script.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'xor2_config')
# Use a pool of four workers to evaluate fitness in parallel.
pe = parallel.ParallelEvaluator(fitness,3)
pop = population.Population(config_path)
pop.run(pe.evaluate, 400)
print("total evolution time {0:.3f} sec".format((time.time() - t0)))
print("time per generation {0:.3f} sec".format(((time.time() - t0) / pop.generation)))
print('Number of evaluations: {0:d}'.format(pop.total_evaluations))
# Verify network output against training data.
print('\nBest network output:')
winner = pop.statistics.best_genome()
net = nn.create_feed_forward_phenotype(winner)
outputs = net.array_activate(xor_inputs)
print("Expected XOR output : ", xor_outputs)
print("Generated output : ", outputs)
# Visualize the winner network and plot statistics.
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
visualize.draw_net(winner, view=True)
if __name__ == '__main__':
run()
|
jbenden/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/language/composer.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: composer
author:
- "Dimitrios Tydeas Mengidis (@dmtrs)"
- "René Moser (@resmo)"
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- >
Composer is a tool for dependency management in PHP. It allows you to
declare the dependent libraries your project needs and it will install
them in your project for you.
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on.
required: false
default: install
arguments:
version_added: "2.0"
description:
- Composer arguments like required package, version and so on.
required: false
default: null
executable:
version_added: "2.4"
description:
- Path to PHP Executable on the remote host, if PHP is not in PATH
required: false
default: null
aliases: [ "php_path" ]
working_dir:
description:
- Directory of your project (see --working-dir). This is required when
the command is not run globally.
- Will be ignored if C(global_command=true).
required: false
default: null
aliases: [ "working-dir" ]
global_command:
version_added: "2.4"
description:
- Runs the specified command globally.
required: false
choices: [ true, false]
default: false
aliases: [ "global-command" ]
prefer_source:
description:
- Forces installation from package sources when possible (see --prefer-source).
required: false
default: false
choices: [ true, false]
aliases: [ "prefer-source" ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions (see --prefer-dist).
required: false
default: false
choices: [ true, false]
aliases: [ "prefer-dist" ]
no_dev:
description:
- Disables installation of require-dev packages (see --no-dev).
required: false
default: true
choices: [ true, false]
aliases: [ "no-dev" ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json (see --no-scripts).
required: false
default: false
choices: [ true, false]
aliases: [ "no-scripts" ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins ).
required: false
default: false
choices: [ true, false]
aliases: [ "no-plugins" ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump (see --optimize-autoloader).
- Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
- Recommended especially for production, but can take a bit of time to run.
required: false
default: true
choices: [ true, false]
aliases: [ "optimize-autoloader" ]
ignore_platform_reqs:
version_added: "2.0"
description:
- Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
required: false
default: false
choices: [ true, false]
aliases: [ "ignore-platform-reqs" ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
- We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer:
command: install
working_dir: /path/to/project
- composer:
command: require
arguments: my/package
working_dir: /path/to/project
# Clone project and install with all dependencies
- composer:
command: create-project
arguments: package/package /path/to/project ~1.0
working_dir: /path/to/project
prefer_dist: yes
# Installs package globally
- composer:
command: require
global_command: yes
arguments: my/package
'''
import re
from ansible.module_utils.basic import AnsibleModule
def parse_out(string):
return re.sub("\s+", " ", string).strip()
def has_changed(string):
return "Nothing to install or update" not in string
def get_available_options(module, command='install'):
# get all available options from a composer command using composer help to json
rc, out, err = composer_command(module, "help %s --format=json" % command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
command_help_json = module.from_json(out)
return command_help_json['definition']['options']
def composer_command(module, command, arguments="", options=None, global_command=False):
if options is None:
options = []
if module.params['executable'] is None:
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
else:
php_path = module.params['executable']
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
command=dict(default="install", type="str", required=False),
arguments=dict(default="", type="str", required=False),
executable=dict(type="path", required=False, aliases=["php_path"]),
working_dir=dict(type="path", aliases=["working-dir"]),
global_command=dict(default=False, type="bool", aliases=["global-command"]),
prefer_source=dict(default=False, type="bool", aliases=["prefer-source"]),
prefer_dist=dict(default=False, type="bool", aliases=["prefer-dist"]),
no_dev=dict(default=True, type="bool", aliases=["no-dev"]),
no_scripts=dict(default=False, type="bool", aliases=["no-scripts"]),
no_plugins=dict(default=False, type="bool", aliases=["no-plugins"]),
optimize_autoloader=dict(default=True, type="bool", aliases=["optimize-autoloader"]),
ignore_platform_reqs=dict(default=False, type="bool", aliases=["ignore-platform-reqs"]),
),
required_if=[('global_command', False, ['working_dir'])],
supports_check_mode=True
)
# Get composer command with fallback to default
command = module.params['command']
if re.search(r"\s", command):
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
arguments = module.params['arguments']
global_command = module.params['global_command']
available_options = get_available_options(module=module, command=command)
options = []
# Default options
default_options = [
'no-ansi',
'no-interaction',
'no-progress',
]
for option in default_options:
if option in available_options:
option = "--%s" % option
options.append(option)
if not global_command:
options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
'no_dev': 'no-dev',
'no_scripts': 'no-scripts',
'no_plugins': 'no_plugins',
'optimize_autoloader': 'optimize-autoloader',
'ignore_platform_reqs': 'ignore-platform-reqs',
}
for param, option in option_params.items():
if module.params.get(param) and option in available_options:
option = "--%s" % option
options.append(option)
if module.check_mode:
options.append('--dry-run')
rc, out, err = composer_command(module, command, arguments, options, global_command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output, stdout=err)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
if __name__ == '__main__':
main()
|
koobonil/Boss2D
|
refs/heads/master
|
Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/distributions/python/ops/bijectors/sigmoid_centered_impl.py
|
104
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SigmoidCentered bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.bijectors import softmax_centered
__all__ = [
"SigmoidCentered",
]
class SigmoidCentered(softmax_centered.SoftmaxCentered):
"""Bijector which computes Y = g(X) = exp([X 0]) / (1 + exp(-X)).
Equivalent to: `bijector.SoftmaxCentered(event_ndims=0)`.
See `bijector.SoftmaxCentered` for more details.
"""
def __init__(self, validate_args=False, name="sigmoid_centered"):
super(SigmoidCentered, self).__init__(
event_ndims=0, validate_args=validate_args, name=name)
|
thanhacun/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/functions.py
|
292
|
##########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
##############################################################################
import uno
import xmlrpclib
import re
import socket
import cPickle
import marshal
import tempfile
if __name__<>"package":
from gui import *
from logreport import *
from rpc import *
database="test"
uid = 1
def genTree(object, aList, insField, host, level=3, ending=None, ending_excl=None, recur=None, root='', actualroot=""):
if ending is None:
ending = []
if ending_excl is None:
ending_excl = []
if recur is None:
recur = []
try:
global url
sock=RPCSession(url)
global passwd
res = sock.execute(database, uid, passwd, object , 'fields_get')
key = res.keys()
key.sort()
for k in key:
if (not ending or res[k]['type'] in ending) and ((not ending_excl) or not (res[k]['type'] in ending_excl)):
insField.addItem(root+'/'+res[k]["string"],len(aList))
aList.append(actualroot+'/'+k)
if (res[k]['type'] in recur) and (level>0):
genTree(res[k]['relation'],aList,insField,host ,level-1, ending, ending_excl, recur,root+'/'+res[k]["string"],actualroot+'/'+k)
except:
obj=Logger()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
obj.log_write('Function', LOG_ERROR, info)
def VariableScope(oTcur, insVariable, aObjectList, aComponentAdd, aItemList, sTableName=""):
if sTableName.find(".") != -1:
for i in range(len(aItemList)):
if aComponentAdd[i]==sTableName:
sLVal=aItemList[i][1][aItemList[i][1].find(",'")+2:aItemList[i][1].find("')")]
for j in range(len(aObjectList)):
if aObjectList[j][:aObjectList[j].find("(")] == sLVal:
insVariable.append(aObjectList[j])
VariableScope(oTcur,insVariable,aObjectList,aComponentAdd,aItemList, sTableName[:sTableName.rfind(".")])
else:
for i in range(len(aItemList)):
if aComponentAdd[i]==sTableName:
sLVal=aItemList[i][1][aItemList[i][1].find(",'")+2:aItemList[i][1].find("')")]
for j in range(len(aObjectList)):
if aObjectList[j][:aObjectList[j].find("(")] == sLVal and sLVal!="":
insVariable.append(aObjectList[j])
def getList(aObjectList, host, count):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
sMain=""
if not count == 0:
if count >= 1:
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sItem[sItem.find("(")+1:sItem.find(",")]=="objects":
sMain = sItem[sItem.find(",'")+2:sItem.find("')")]
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn":
if sItem[sItem.find("(")+1:sItem.find(",")]=="objects":
aObjectList.append(sItem[sItem.rfind(",'")+2:sItem.rfind("')")] + "(" + docinfo.getUserFieldValue(3) + ")")
else:
sTemp=sItem[sItem.find("(")+1:sItem.find(",")]
if sMain == sTemp[:sTemp.find(".")]:
getRelation(docinfo.getUserFieldValue(3), sItem[sItem.find(".")+1:sItem.find(",")], sItem[sItem.find(",'")+2:sItem.find("')")],aObjectList,host)
else:
sPath=getPath(sItem[sItem.find("(")+1:sItem.find(",")], sMain)
getRelation(docinfo.getUserFieldValue(3), sPath, sItem[sItem.find(",'")+2:sItem.find("')")],aObjectList,host)
else:
aObjectList.append("List of " + docinfo.getUserFieldValue(3))
def getRelation(sRelName, sItem, sObjName, aObjectList, host):
global url
sock=RPCSession(url)
global passwd
res = sock.execute(database, uid, passwd, sRelName , 'fields_get')
key = res.keys()
for k in key:
if sItem.find(".") == -1:
if k == sItem:
aObjectList.append(sObjName + "(" + res[k]['relation'] + ")")
return 0
if k == sItem[:sItem.find(".")]:
getRelation(res[k]['relation'], sItem[sItem.find(".")+1:], sObjName,aObjectList,host)
def getPath(sPath, sMain):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
sItem=oPar.Items[1]
if sPath[:sPath.find(".")] == sMain:
break;
else:
res = re.findall('\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]',sPath)
if len(res) <> 0:
if sItem[sItem.find(",'")+2:sItem.find("')")] == sPath[:sPath.find(".")]:
sPath = sItem[sItem.find("(")+1:sItem.find(",")] + sPath[sPath.find("."):]
getPath(sPath, sMain)
return sPath
def EnumDocument(aItemList, aComponentAdd):
desktop = getDesktop()
parent=""
bFlag = False
Doc =desktop.getCurrentComponent()
#oVC = Doc.CurrentController.getViewCursor()
oParEnum = Doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.Anchor.TextTable:
#parent = oPar.Anchor.TextTable.Name
getChildTable(oPar.Anchor.TextTable,aItemList,aComponentAdd)
elif oPar.Anchor.TextSection:
parent = oPar.Anchor.TextSection.Name
elif oPar.Anchor.Text:
parent = "Document"
sItem=oPar.Items[1].replace(' ',"")
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn" and not oPar.Items in aItemList:
templist=oPar.Items[0],sItem
aItemList.append( templist )
aComponentAdd.append( parent )
def getChildTable(oPar, aItemList, aComponentAdd, sTableName=""):
sNames = oPar.getCellNames()
bEmptyTableFlag=True
for val in sNames:
oCell = oPar.getCellByName(val)
oCurEnum = oCell.createEnumeration()
while oCurEnum.hasMoreElements():
try:
oCur = oCurEnum.nextElement()
if oCur.supportsService("com.sun.star.text.TextTable"):
if sTableName=="":
getChildTable(oCur,aItemList,aComponentAdd,oPar.Name)
else:
getChildTable(oCur,aItemList,aComponentAdd,sTableName+"."+oPar.Name)
else:
oSecEnum = oCur.createEnumeration()
while oSecEnum.hasMoreElements():
oSubSection = oSecEnum.nextElement()
if oSubSection.supportsService("com.sun.star.text.TextField"):
bEmptyTableFlag=False
sItem=oSubSection.TextField.Items[1]
if sItem[sItem.find("[[ ")+3:sItem.find("(")]=="repeatIn":
if aItemList.__contains__(oSubSection.TextField.Items)==False:
aItemList.append(oSubSection.TextField.Items)
if sTableName=="":
if aComponentAdd.__contains__(oPar.Name)==False:
aComponentAdd.append(oPar.Name)
else:
if aComponentAdd.__contains__(sTableName+"."+oPar.Name)==False:
aComponentAdd.append(sTableName+"."+oPar.Name)
except:
obj=Logger()
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
obj.log_write('Function', LOG_ERROR, info)
if bEmptyTableFlag==True:
aItemList.append((u'',u''))
if sTableName=="":
if aComponentAdd.__contains__(oPar.Name)==False:
aComponentAdd.append(oPar.Name)
else:
if aComponentAdd.__contains__(sTableName+"."+oPar.Name)==False:
aComponentAdd.append(sTableName+"."+oPar.Name)
return 0
def getRecersiveSection(oCurrentSection, aSectionList):
desktop=getDesktop()
doc =desktop.getCurrentComponent()
oParEnum=doc.getText().createEnumeration()
aSectionList.append(oCurrentSection.Name)
if oCurrentSection.ParentSection:
getRecersiveSection(oCurrentSection.ParentSection,aSectionList)
else:
return
def GetAFileName():
oFileDialog=None
iAccept=None
sPath=""
InitPath=""
oUcb=None
oFileDialog = createUnoService("com.sun.star.ui.dialogs.FilePicker")
oUcb = createUnoService("com.sun.star.ucb.SimpleFileAccess")
oFileDialog.appendFilter("Odoo Report File","*.sxw")
oFileDialog.setCurrentFilter("Odoo Report File")
if InitPath == "":
InitPath =tempfile.gettempdir()
#End If
if oUcb.exists(InitPath):
oFileDialog.setDisplayDirectory(InitPath)
#End If
iAccept = oFileDialog.execute()
if iAccept == 1:
sPath = oFileDialog.Files[0]
oFileDialog.dispose()
return sPath
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
grimmjow8/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/mysql/mysql_variables.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage mysql variables
(c) 2013, Balazs Pocze <[email protected]>
Certain parts are taken from Mark Theunissen's mysqldb module
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: mysql_variables
short_description: Manage MySQL global variables
description:
- Query / Set MySQL variables
version_added: 1.3
author: "Balazs Pocze (@banyek)"
options:
variable:
description:
- Variable name to operate
required: True
value:
description:
- If set, then sets variable value to this
required: False
extends_documentation_fragment: mysql
'''
EXAMPLES = '''
# Check for sync_binlog setting
- mysql_variables:
variable: sync_binlog
# Set read_only variable to 1
- mysql_variables:
variable: read_only
value: 1
'''
import warnings
from re import match
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
def typedvalue(value):
"""
Convert value to number whenever possible, return same value
otherwise.
>>> typedvalue('3')
3
>>> typedvalue('3.0')
3.0
>>> typedvalue('foobar')
'foobar'
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall()
if len(mysqlvar_val) is 1:
return mysqlvar_val[0][1]
else:
return None
def setvariable(cursor, mysqlvar, value):
""" Set a global mysql variable to a given value
The DB driver will handle quoting of the given value based on its
type, thus numeric strings like '3.0' or '8' are illegal, they
should be passed as numeric literals.
"""
query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars')
try:
cursor.execute(query + "%s", (value,))
cursor.fetchall()
result = True
except Exception:
e = get_exception()
result = str(e)
return result
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default="localhost"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
variable=dict(default=None),
value=dict(default=None),
ssl_cert=dict(default=None),
ssl_key=dict(default=None),
ssl_ca=dict(default=None),
connect_timeout=dict(default=30, type='int'),
config_file=dict(default="~/.my.cnf", type="path")
)
)
user = module.params["login_user"]
password = module.params["login_password"]
ssl_cert = module.params["ssl_cert"]
ssl_key = module.params["ssl_key"]
ssl_ca = module.params["ssl_ca"]
connect_timeout = module.params['connect_timeout']
config_file = module.params['config_file']
db = 'mysql'
mysqlvar = module.params["variable"]
value = module.params["value"]
if mysqlvar is None:
module.fail_json(msg="Cannot run without variable to operate with")
if match('^[0-9a-z_]+$', mysqlvar) is None:
module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar)
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
else:
warnings.filterwarnings('error', category=MySQLdb.Warning)
try:
cursor = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db,
connect_timeout=connect_timeout)
except Exception:
e = get_exception()
if os.path.exists(config_file):
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))
mysqlvar_val = getvariable(cursor, mysqlvar)
if mysqlvar_val is None:
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
if value is None:
module.exit_json(msg=mysqlvar_val)
else:
# Type values before using them
value_wanted = typedvalue(value)
value_actual = typedvalue(mysqlvar_val)
if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False)
try:
result = setvariable(cursor, mysqlvar, value_wanted)
except SQLParseError:
e = get_exception()
result = str(e)
if result is True:
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
else:
module.fail_json(msg=result, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
from ansible.module_utils.mysql import *
if __name__ == '__main__':
main()
|
astrobin/astrobin
|
refs/heads/master
|
astrobin_apps_iotd/api/serializers/hidden_image_serializer.py
|
1
|
from rest_framework import serializers
from astrobin_apps_iotd.models import IotdHiddenImage
class HiddenImageSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(read_only=True, default=serializers.CurrentUserDefault())
class Meta:
model = IotdHiddenImage
fields = (
'id',
'user',
'image',
'created',
)
|
hynnet/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/curses/panel.py
|
155
|
"""curses.panel
Module for using panels with curses.
"""
__revision__ = "$Id$"
from _curses_panel import *
|
DirtyUnicorns/android_external_chromium-org
|
refs/heads/kitkat
|
chrome/common/extensions/docs/server2/handler.py
|
68
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from cron_servlet import CronServlet
from instance_servlet import InstanceServlet
from patch_servlet import PatchServlet
from servlet import Servlet, Request, Response
from test_servlet import TestServlet
_DEFAULT_SERVLET = InstanceServlet.GetConstructor()
_SERVLETS = {
'cron': CronServlet,
'patch': PatchServlet,
'test': TestServlet,
}
class Handler(Servlet):
def Get(self):
path = self._request.path
if path.startswith('_'):
servlet_path = path[1:]
if not '/' in servlet_path:
servlet_path += '/'
servlet_name, servlet_path = servlet_path.split('/', 1)
servlet = _SERVLETS.get(servlet_name)
if servlet is None:
return Response.NotFound('"%s" servlet not found' % servlet_path)
else:
servlet_path = path
servlet = _DEFAULT_SERVLET
return servlet(
Request(servlet_path, self._request.host, self._request.headers)).Get()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.