ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a3d6cb1189e5d1885f403c17b9694eae9472f0a | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Summarizer.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Summarizer(object):
def setupUi(self, Summarizer):
Summarizer.setObjectName("Summarizer")
Summarizer.resize(692, 323)
self.centralwidget = QtWidgets.QWidget(Summarizer)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.pushButton_rootdir = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButton_rootdir.sizePolicy().hasHeightForWidth())
self.pushButton_rootdir.setSizePolicy(sizePolicy)
self.pushButton_rootdir.setObjectName("pushButton_rootdir")
self.gridLayout_2.addWidget(self.pushButton_rootdir, 0, 0, 1, 1)
self.p_root_dir = QtWidgets.QLineEdit(self.centralwidget)
self.p_root_dir.setObjectName("p_root_dir")
self.gridLayout_2.addWidget(self.p_root_dir, 0, 1, 1, 2)
self.p_feature_type = QtWidgets.QComboBox(self.centralwidget)
self.p_feature_type.setObjectName("p_feature_type")
self.gridLayout_2.addWidget(self.p_feature_type, 1, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)
self.p_is_manual_index = QtWidgets.QCheckBox(self.centralwidget)
self.p_is_manual_index.setObjectName("p_is_manual_index")
self.gridLayout_2.addWidget(self.p_is_manual_index, 1, 2, 1, 1)
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 2, 0, 1, 1)
self.p_summary_type = QtWidgets.QComboBox(self.centralwidget)
self.p_summary_type.setObjectName("p_summary_type")
self.gridLayout_2.addWidget(self.p_summary_type, 2, 1, 1, 1)
self.verticalLayout_3.addLayout(self.gridLayout_2)
self.FoldArgs = QtWidgets.QWidget(self.centralwidget)
self.FoldArgs.setObjectName("FoldArgs")
self.gridLayout = QtWidgets.QGridLayout(self.FoldArgs)
self.gridLayout.setObjectName("gridLayout")
self.p_frac_worms_to_keep = QtWidgets.QDoubleSpinBox(self.FoldArgs)
self.p_frac_worms_to_keep.setMaximum(1.0)
self.p_frac_worms_to_keep.setSingleStep(0.01)
self.p_frac_worms_to_keep.setObjectName("p_frac_worms_to_keep")
self.gridLayout.addWidget(self.p_frac_worms_to_keep, 0, 1, 1, 1)
self.p_n_folds = QtWidgets.QSpinBox(self.FoldArgs)
self.p_n_folds.setObjectName("p_n_folds")
self.gridLayout.addWidget(self.p_n_folds, 0, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.FoldArgs)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 1, 1, 1, 1)
self.label = QtWidgets.QLabel(self.FoldArgs)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.FoldArgs)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_3.sizePolicy().hasHeightForWidth())
self.label_3.setSizePolicy(sizePolicy)
self.label_3.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 1, 2, 1, 1)
self.p_time_sample_seconds = QtWidgets.QDoubleSpinBox(self.FoldArgs)
self.p_time_sample_seconds.setMaximum(100000000000.0)
self.p_time_sample_seconds.setObjectName("p_time_sample_seconds")
self.gridLayout.addWidget(self.p_time_sample_seconds, 0, 2, 1, 1)
self.verticalLayout_3.addWidget(self.FoldArgs)
self.pushButton_start = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_start.setObjectName("pushButton_start")
self.verticalLayout_3.addWidget(self.pushButton_start)
Summarizer.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Summarizer)
self.menubar.setGeometry(QtCore.QRect(0, 0, 692, 22))
self.menubar.setObjectName("menubar")
Summarizer.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Summarizer)
self.statusbar.setObjectName("statusbar")
Summarizer.setStatusBar(self.statusbar)
self.retranslateUi(Summarizer)
QtCore.QMetaObject.connectSlotsByName(Summarizer)
def retranslateUi(self, Summarizer):
_translate = QtCore.QCoreApplication.translate
Summarizer.setWindowTitle(_translate("Summarizer", "MainWindow"))
self.pushButton_rootdir.setText(_translate("Summarizer", "Root Directory"))
self.label_4.setText(_translate("Summarizer", "Features Type"))
self.p_is_manual_index.setText(_translate("Summarizer", "Use manually \n"
"edited trajectories?"))
self.label_5.setText(_translate("Summarizer", "Summary Type"))
self.label_2.setText(_translate("Summarizer", "Fraction of trajectories\n"
" to sample"))
self.label.setText(_translate("Summarizer", "Number of Folds"))
self.label_3.setText(_translate("Summarizer", "Time (seconds)\n"
" to sample"))
self.pushButton_start.setText(_translate("Summarizer", "START"))
|
py | 1a3d6cd00accac43a14877b4eeff617dde4178e8 | # coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060333438(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.33438',
beschrijving='Gewassen betonstraatstenen, wit met kleurondersteunende granulaten volgens 6-3.4, 220 x 220, 80 mm',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen.afmetingVanBestratingselementLxB',
dotnotatie='afmetingVanBestratingselementLxB',
defaultWaarde='220-x-220',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.33438')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen.type',
dotnotatie='type',
defaultWaarde='witte-met-kleurondersteunende-granulaten',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.33438')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.laagRol',
dotnotatie='laagRol',
defaultWaarde='straatlaag',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.33438')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen.afwerking',
dotnotatie='afwerking',
defaultWaarde='gewassen',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.33438')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#LaagDikte.dikte',
dotnotatie='dikte',
defaultWaarde='8',
range='',
usagenote='cm^^cdt:ucumunit',
isMeetstaatAttr=0,
isAltijdInTeVullen=0,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.33438')
, StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen',
attribuutURI='https://wegenenverkeer.data.vlaanderen.be/ns/abstracten#Laag.oppervlakte',
dotnotatie='oppervlakte',
defaultWaarde='',
range='',
usagenote='m2^^cdt:ucumunit',
isMeetstaatAttr=1,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='gemapt 2.0',
mappingOpmerking='',
standaardpostnummer='0603.33438')])
|
py | 1a3d6e442b369b8a3614c20701bc31a508a090e1 | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from functools import partial
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import dataclass_json
from marshmallow import fields as marshmallow_fields
from .datetime_utils import fromisoformat
DATETIME_FIELD = field(
metadata={
"dataclasses_json": {
"encoder": datetime.isoformat,
"decoder": fromisoformat,
"mm_field": marshmallow_fields.DateTime(format="iso"),
}
}
)
@dataclass_json
@dataclass
class RemoveSiteSurveyMutation:
__QUERY__ = """
mutation RemoveSiteSurveyMutation($id: ID!) {
removeSiteSurvey(id: $id)
}
"""
@dataclass_json
@dataclass
class RemoveSiteSurveyMutationData:
removeSiteSurvey: str
data: Optional[RemoveSiteSurveyMutationData] = None
errors: Any = None
@classmethod
# fmt: off
def execute(cls, client, id: str):
# fmt: off
variables = {"id": id}
response_text = client.call(cls.__QUERY__, variables=variables)
return cls.from_json(response_text).data
|
py | 1a3d6e99aca45cabe92bfb8f5073451aa582bc61 | # Given an array of integers, where all elements but one occur twice, find the unique element.
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'lonelyinteger' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY a as parameter.
#
def lonelyinteger(a):
d = dict((x , a.count(x)) for x in set(a))
a = sorted(d.items(), key=lambda x: x[1])[0][0]
return a
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
a = list(map(int, input().rstrip().split()))
result = lonelyinteger(a)
fptr.write(str(result) + '\n')
fptr.close()
|
py | 1a3d6eed3c89dce89b18c1533c0a963b68c38c43 | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://epayments-api.developer-ingenico.com/s2sapi/v1/
#
from ingenico.connect.sdk.data_object import DataObject
class ThreeDSecureData(DataObject):
"""
| Object containing data regarding the 3D Secure authentication
"""
__acs_transaction_id = None
__method = None
__utc_timestamp = None
@property
def acs_transaction_id(self):
"""
| The ACS Transaction ID for a prior 3-D Secure authenticated transaction (for example, the first recurring transaction that was authenticated with the customer)
Type: str
"""
return self.__acs_transaction_id
@acs_transaction_id.setter
def acs_transaction_id(self, value):
self.__acs_transaction_id = value
@property
def method(self):
"""
| Method of authentication used for this transaction.Possible values:
* frictionless = The authentication went without a challenge
* challenged = Cardholder was challenged
* avs-verified = The authentication was verified by AVS
* other = Another issuer method was used to authenticate this transaction
Type: str
"""
return self.__method
@method.setter
def method(self, value):
self.__method = value
@property
def utc_timestamp(self):
"""
| Timestamp in UTC (YYYYMMDDHHmm) of the 3-D Secure authentication of this transaction
Type: str
"""
return self.__utc_timestamp
@utc_timestamp.setter
def utc_timestamp(self, value):
self.__utc_timestamp = value
def to_dictionary(self):
dictionary = super(ThreeDSecureData, self).to_dictionary()
if self.acs_transaction_id is not None:
dictionary['acsTransactionId'] = self.acs_transaction_id
if self.method is not None:
dictionary['method'] = self.method
if self.utc_timestamp is not None:
dictionary['utcTimestamp'] = self.utc_timestamp
return dictionary
def from_dictionary(self, dictionary):
super(ThreeDSecureData, self).from_dictionary(dictionary)
if 'acsTransactionId' in dictionary:
self.acs_transaction_id = dictionary['acsTransactionId']
if 'method' in dictionary:
self.method = dictionary['method']
if 'utcTimestamp' in dictionary:
self.utc_timestamp = dictionary['utcTimestamp']
return self
|
py | 1a3d6f0eb86d345cd8bfcf94cb2182c1c942d3da | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
import pytest
import cirq
import cirq.neutral_atoms as neutral_atoms
import cirq.testing
def square_device(
width: int, height: int, holes=(), max_controls=2, use_timedelta=False
) -> neutral_atoms.NeutralAtomDevice:
us = cirq.Duration(nanos=10**3) if not use_timedelta else timedelta(microseconds=1)
ms = cirq.Duration(nanos=10**6) if not use_timedelta else timedelta(microseconds=1000)
return neutral_atoms.NeutralAtomDevice( # type: ignore
measurement_duration=50 * ms, # type: ignore
gate_duration=100 * us, # type: ignore
control_radius=1.5,
max_parallel_z=3,
max_parallel_xy=3,
max_parallel_c=max_controls,
qubits=[
cirq.GridQubit(row, col)
for col in range(width)
for row in range(height)
if cirq.GridQubit(row, col) not in holes
],
)
def test_init():
d = square_device(2, 2, holes=[cirq.GridQubit(1, 1)])
us = cirq.Duration(nanos=10**3)
ms = cirq.Duration(nanos=10**6)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
assert d.qubits == {q10, q00, q01}
assert d.duration_of(cirq.GateOperation(cirq.IdentityGate(1), [q00])) == 100 * us
assert d.duration_of(cirq.measure(q00)) == 50 * ms
with pytest.raises(ValueError):
_ = d.duration_of(cirq.SingleQubitGate().on(q00))
def test_metadata():
d = square_device(2, 3)
assert d.metadata.qubit_set == frozenset(
{
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1),
cirq.GridQubit(1, 0),
cirq.GridQubit(1, 1),
cirq.GridQubit(2, 0),
cirq.GridQubit(2, 1),
}
)
assert len(d.metadata.nx_graph.edges()) == 7
def test_init_timedelta():
d = square_device(2, 2, holes=[cirq.GridQubit(1, 1)], use_timedelta=True)
us = cirq.Duration(nanos=10**3)
ms = cirq.Duration(nanos=10**6)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
assert d.qubits == {q10, q00, q01}
assert d.duration_of(cirq.GateOperation(cirq.IdentityGate(1), [q00])) == 100 * us
assert d.duration_of(cirq.measure(q00)) == 50 * ms
with pytest.raises(ValueError):
_ = d.duration_of(cirq.SingleQubitGate().on(q00))
def test_init_errors():
line = cirq.LineQubit.range(3)
us = cirq.Duration(nanos=10**3)
ms = cirq.Duration(nanos=10**6)
with pytest.raises(ValueError, match="Unsupported qubit type"):
_ = neutral_atoms.NeutralAtomDevice(
measurement_duration=50 * ms,
gate_duration=100 * us,
control_radius=1.5,
max_parallel_z=3,
max_parallel_xy=3,
max_parallel_c=3,
qubits=line,
)
with pytest.raises(ValueError, match="max_parallel_c must be less"):
_ = neutral_atoms.NeutralAtomDevice(
measurement_duration=50 * ms,
gate_duration=100 * us,
control_radius=1.5,
max_parallel_z=3,
max_parallel_xy=3,
max_parallel_c=4,
qubits=[cirq.GridQubit(0, 0)],
)
def test_decompose_error_deprecated():
d = square_device(2, 2, holes=[cirq.GridQubit(1, 1)])
with cirq.testing.assert_deprecated('ConvertToNeutralAtomGates', deadline='v0.15'):
for op in d.decompose_operation((cirq.CCZ**1.5).on(*(d.qubit_list()))):
d.validate_operation(op)
def test_validate_gate_errors():
d = square_device(1, 1)
d.validate_gate(cirq.IdentityGate(4))
with pytest.raises(ValueError, match="controlled gates must have integer exponents"):
d.validate_gate(cirq.CNotPowGate(exponent=0.5))
with pytest.raises(ValueError, match="Unsupported gate"):
d.validate_gate(cirq.SingleQubitGate())
def test_validate_operation_errors():
d = square_device(3, 3)
class bad_op(cirq.Operation):
def bad_op(self):
pass
def qubits(self):
pass
def with_qubits(self, new_qubits):
pass
with pytest.raises(ValueError, match="Unsupported operation"):
d.validate_operation(bad_op())
not_on_device_op = cirq.parallel_gate_op(
cirq.X, *[cirq.GridQubit(row, col) for col in range(4) for row in range(4)]
)
with pytest.raises(ValueError, match="Qubit not on device"):
d.validate_operation(not_on_device_op)
with pytest.raises(ValueError, match="Too many qubits acted on in parallel by"):
d.validate_operation(cirq.CCX.on(*d.qubit_list()[0:3]))
with pytest.raises(ValueError, match="are too far away"):
d.validate_operation(cirq.CZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(2, 2)))
with pytest.raises(ValueError, match="Unsupported operation"):
d.validate_operation(cirq.parallel_gate_op(cirq.Z, *d.qubits))
with pytest.raises(ValueError, match="Unsupported operation"):
d.validate_operation(cirq.parallel_gate_op(cirq.X, *d.qubit_list()[1:]))
with pytest.raises(ValueError, match="Unsupported operation"):
d.validate_operation(
cirq.ParallelGate(cirq.MeasurementGate(1, key='a'), 4)(*d.qubit_list()[:4])
)
def test_validate_moment_errors():
d = square_device(3, 3)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
q11 = cirq.GridQubit(1, 1)
q12 = cirq.GridQubit(1, 2)
q02 = cirq.GridQubit(0, 2)
q04 = cirq.GridQubit(0, 4)
q03 = cirq.GridQubit(0, 3)
q20 = cirq.GridQubit(2, 0)
q21 = cirq.GridQubit(2, 1)
m = cirq.Moment([cirq.Z.on(q00), (cirq.Z**2).on(q01)])
with pytest.raises(ValueError, match="Non-identical simultaneous "):
d.validate_moment(m)
m = cirq.Moment([cirq.X.on(q00), cirq.Y.on(q01)])
with pytest.raises(ValueError, match="Non-identical simultaneous "):
d.validate_moment(m)
m = cirq.Moment([cirq.CNOT.on(q00, q01), cirq.CZ.on(q12, q02)])
with pytest.raises(ValueError, match="Non-identical simultaneous "):
d.validate_moment(m)
m = cirq.Moment([cirq.CNOT.on(q00, q01), cirq.CNOT.on(q12, q02)])
with pytest.raises(ValueError, match="Too many qubits acted on by controlled gates"):
d.validate_moment(m)
m = cirq.Moment([cirq.CNOT.on(q00, q01), cirq.Z.on(q02)])
with pytest.raises(
ValueError,
match="Can't perform non-controlled operations at same time as controlled operations",
):
d.validate_moment(m)
m = cirq.Moment(cirq.Z.on_each(*d.qubits))
with pytest.raises(ValueError, match="Too many simultaneous Z gates"):
d.validate_moment(m)
m = cirq.Moment(cirq.X.on_each(*(d.qubit_list()[1:])))
with pytest.raises(ValueError, match="Bad number of simultaneous XY gates"):
d.validate_moment(m)
m = cirq.Moment([cirq.MeasurementGate(1, 'a').on(q00), cirq.Z.on(q01)])
with pytest.raises(
ValueError, match="Measurements can't be simultaneous with other operations"
):
d.validate_moment(m)
d.validate_moment(cirq.Moment([cirq.X.on(q00), cirq.Z.on(q01)]))
us = cirq.Duration(nanos=10**3)
ms = cirq.Duration(nanos=10**6)
d2 = neutral_atoms.NeutralAtomDevice(
measurement_duration=50 * ms,
gate_duration=100 * us,
control_radius=1.5,
max_parallel_z=4,
max_parallel_xy=4,
max_parallel_c=4,
qubits=[cirq.GridQubit(row, col) for col in range(2) for row in range(2)],
)
m = cirq.Moment([cirq.CNOT.on(q00, q01), cirq.CNOT.on(q10, q11)])
with pytest.raises(ValueError, match="Interacting controlled operations"):
d2.validate_moment(m)
d2 = neutral_atoms.NeutralAtomDevice(
measurement_duration=50 * ms,
gate_duration=100 * us,
control_radius=1.1,
max_parallel_z=6,
max_parallel_xy=6,
max_parallel_c=6,
qubits=[cirq.GridQubit(row, col) for col in range(5) for row in range(5)],
)
m = cirq.Moment([cirq.CZ.on(q00, q01), cirq.CZ.on(q03, q04), cirq.CZ.on(q20, q21)])
d2.validate_moment(m)
m = cirq.Moment([cirq.CZ.on(q00, q01), cirq.CZ.on(q02, q03), cirq.CZ.on(q10, q11)])
with pytest.raises(ValueError, match="Interacting controlled operations"):
d2.validate_moment(m)
def test_can_add_operation_into_moment_coverage_deprecated():
with cirq.testing.assert_deprecated('can_add_operation_into_moment', deadline='v0.15', count=3):
d = square_device(2, 2)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
m = cirq.Moment([cirq.X.on(q00)])
assert not d.can_add_operation_into_moment(cirq.X.on(q00), m)
assert not d.can_add_operation_into_moment(cirq.CZ.on(q01, q10), m)
assert d.can_add_operation_into_moment(cirq.Z.on(q01), m)
def test_validate_circuit_errors():
d = square_device(2, 2, max_controls=3)
q00 = cirq.GridQubit(0, 0)
q01 = cirq.GridQubit(0, 1)
q10 = cirq.GridQubit(1, 0)
q11 = cirq.GridQubit(1, 1)
c = cirq.Circuit()
c.append(cirq.parallel_gate_op(cirq.X, *d.qubits))
c.append(cirq.CCZ.on(q00, q01, q10))
c.append(cirq.parallel_gate_op(cirq.Z, q00, q01, q10))
m = cirq.Moment(cirq.X.on_each(q00, q01) + cirq.Z.on_each(q10, q11))
c.append(m)
c.append(cirq.measure_each(*d.qubits))
d.validate_circuit(c)
c.append(cirq.Moment([cirq.X.on(q00)]))
with pytest.raises(ValueError, match="Non-empty moment after measurement"):
d.validate_circuit(c)
def test_repr():
d = square_device(1, 1)
cirq.testing.assert_equivalent_repr(d)
def test_str():
assert (
str(square_device(2, 2)).strip()
== """
(0, 0)───(0, 1)
│ │
│ │
(1, 0)───(1, 1)
""".strip()
)
def test_repr_pretty():
cirq.testing.assert_repr_pretty(
square_device(2, 2),
"""
(0, 0)───(0, 1)
│ │
│ │
(1, 0)───(1, 1)
""".strip(),
)
cirq.testing.assert_repr_pretty(square_device(2, 2), "cirq.NeutralAtomDevice(...)", cycle=True)
def test_qubit_set_deprecated():
with cirq.testing.assert_deprecated('qubit_set', deadline='v0.15'):
assert square_device(2, 2).qubit_set() == frozenset(cirq.GridQubit.square(2, 0, 0))
|
py | 1a3d70b7772580db3e2f1cc6be62e0feb441cb95 | # Copyright 2019 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import time
from anna.client import AnnaTcpClient
from anna.zmq_util import SocketCache
import zmq
from cloudburst.server import utils as sutils
from cloudburst.server.executor import utils
from cloudburst.server.executor.call import exec_function, exec_dag_function
from cloudburst.server.executor.pin import pin, unpin
from cloudburst.server.executor.user_library import CloudburstUserLibrary
from cloudburst.shared.anna_ipc_client import AnnaIpcClient
from cloudburst.shared.proto.cloudburst_pb2 import (
DagSchedule,
DagTrigger,
MULTIEXEC # Cloudburst's execution types
)
from cloudburst.shared.proto.internal_pb2 import (
CPU, GPU, # Cloudburst's executor types
ExecutorStatistics,
ThreadStatus,
)
REPORT_THRESH = 5
BATCH_SIZE_MAX = 20
def executor(ip, mgmt_ip, schedulers, thread_id):
# logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s %(message)s')
logging.basicConfig(filename='log_executor.txt', level=logging.INFO, filemode="w",
format='%(asctime)s %(message)s')
# Check what resources we have access to, set as an environment variable.
if os.getenv('EXECUTOR_TYPE', 'CPU') == 'GPU':
exec_type = GPU
else:
exec_type = CPU
context = zmq.Context(1)
poller = zmq.Poller()
pin_socket = context.socket(zmq.PULL)
pin_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.PIN_PORT + thread_id))
unpin_socket = context.socket(zmq.PULL)
unpin_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.UNPIN_PORT +
thread_id))
exec_socket = context.socket(zmq.PULL)
exec_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.FUNC_EXEC_PORT +
thread_id))
dag_queue_socket = context.socket(zmq.PULL)
dag_queue_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.DAG_QUEUE_PORT
+ thread_id))
dag_exec_socket = context.socket(zmq.PULL)
dag_exec_socket.bind(sutils.BIND_ADDR_TEMPLATE % (sutils.DAG_EXEC_PORT
+ thread_id))
self_depart_socket = context.socket(zmq.PULL)
self_depart_socket.bind(sutils.BIND_ADDR_TEMPLATE %
(sutils.SELF_DEPART_PORT + thread_id))
pusher_cache = SocketCache(context, zmq.PUSH)
poller = zmq.Poller()
poller.register(pin_socket, zmq.POLLIN)
poller.register(unpin_socket, zmq.POLLIN)
poller.register(exec_socket, zmq.POLLIN)
poller.register(dag_queue_socket, zmq.POLLIN)
poller.register(dag_exec_socket, zmq.POLLIN)
poller.register(self_depart_socket, zmq.POLLIN)
# If the management IP is set to None, that means that we are running in
# local mode, so we use a regular AnnaTcpClient rather than an IPC client.
has_ephe = False
if mgmt_ip:
if 'STORAGE_OR_DEFAULT' in os.environ and os.environ['STORAGE_OR_DEFAULT'] == '0':
client = AnnaTcpClient(os.environ['ROUTE_ADDR'], ip, local=False, offset=thread_id)
has_ephe = True
else:
client = AnnaIpcClient(thread_id, context)
# force_remote_anna = 1
# if 'FORCE_REMOTE' in os.environ:
# force_remote_anna = int(os.environ['FORCE_REMOTE'])
# if force_remote_anna == 0: # remote anna only
# client = AnnaTcpClient(os.environ['ROUTE_ADDR'], ip, local=False, offset=thread_id)
# elif force_remote_anna == 1: # anna cache
# client = AnnaIpcClient(thread_id, context)
# elif force_remote_anna == 2: # control both cache and remote anna
# remote_client = AnnaTcpClient(os.environ['ROUTE_ADDR'], ip, local=False, offset=thread_id)
# cache_client = AnnaIpcClient(thread_id, context)
# client = cache_client
# user_library = CloudburstUserLibrary(context, pusher_cache, ip, thread_id, (cache_client, remote_client))
local = False
else:
client = AnnaTcpClient('127.0.0.1', '127.0.0.1', local=True, offset=1)
local = True
user_library = CloudburstUserLibrary(context, pusher_cache, ip, thread_id, client, has_ephe=has_ephe)
status = ThreadStatus()
status.ip = ip
status.tid = thread_id
status.running = True
status.type = exec_type
utils.push_status(schedulers, pusher_cache, status)
departing = False
# Maintains a request queue for each function pinned on this executor. Each
# function will have a set of request IDs mapped to it, and this map stores
# a schedule for each request ID.
queue = {}
# Tracks the actual function objects that are pinned to this executor.
function_cache = {}
# Tracks runtime cost of excuting a DAG function.
runtimes = {}
# If multiple triggers are necessary for a function, track the triggers as
# we receive them. This is also used if a trigger arrives before its
# corresponding schedule.
received_triggers = {}
# Tracks when we received a function request, so we can report end-to-end
# latency for the whole executio.
receive_times = {}
# Tracks the number of requests we are finishing for each function pinned
# here.
exec_counts = {}
# Tracks the end-to-end runtime of each DAG request for which we are the
# sink function.
dag_runtimes = {}
# A map with KVS keys and their corresponding deserialized payloads.
cache = {}
# A map which tracks the most recent DAGs for which we have finished our
# work.
finished_executions = {}
# The set of pinned functions and whether they support batching. NOTE: This
# is only a set for local mode -- in cluster mode, there will only be one
# pinned function per executor.
batching = False
# Internal metadata to track thread utilization.
report_start = time.time()
event_occupancy = {'pin': 0.0,
'unpin': 0.0,
'func_exec': 0.0,
'dag_queue': 0.0,
'dag_exec': 0.0}
total_occupancy = 0.0
while True:
socks = dict(poller.poll(timeout=1000))
if pin_socket in socks and socks[pin_socket] == zmq.POLLIN:
work_start = time.time()
batching = pin(pin_socket, pusher_cache, client, status,
function_cache, runtimes, exec_counts, user_library,
local, batching)
utils.push_status(schedulers, pusher_cache, status)
elapsed = time.time() - work_start
event_occupancy['pin'] += elapsed
total_occupancy += elapsed
if unpin_socket in socks and socks[unpin_socket] == zmq.POLLIN:
work_start = time.time()
unpin(unpin_socket, status, function_cache, runtimes,
exec_counts)
utils.push_status(schedulers, pusher_cache, status)
elapsed = time.time() - work_start
event_occupancy['unpin'] += elapsed
total_occupancy += elapsed
if exec_socket in socks and socks[exec_socket] == zmq.POLLIN:
work_start = time.time()
# logging.info(f'Executor timer. exec_socket recv: {work_start}')
exec_function(exec_socket, client, user_library, cache,
function_cache, has_ephe=has_ephe)
user_library.close()
utils.push_status(schedulers, pusher_cache, status)
elapsed = time.time() - work_start
event_occupancy['func_exec'] += elapsed
total_occupancy += elapsed
if dag_queue_socket in socks and socks[dag_queue_socket] == zmq.POLLIN:
work_start = time.time()
logging.info(f'Executor timer. dag_queue_socket recv: {work_start}')
# In order to effectively support batching, we have to make sure we
# dequeue lots of schedules in addition to lots of triggers. Right
# now, we're not going to worry about supporting batching here,
# just on the trigger dequeue side, but we still have to dequeue
# all schedules we've received. We just process them one at a time.
while True:
schedule = DagSchedule()
try:
msg = dag_queue_socket.recv(zmq.DONTWAIT)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
break # There are no more messages.
else:
raise e # Unexpected error.
schedule.ParseFromString(msg)
fname = schedule.target_function
logging.info('Received a schedule for DAG %s (%s), function %s.' %
(schedule.dag.name, schedule.id, fname))
if fname not in queue:
queue[fname] = {}
queue[fname][schedule.id] = schedule
if (schedule.id, fname) not in receive_times:
receive_times[(schedule.id, fname)] = time.time()
# In case we receive the trigger before we receive the schedule, we
# can trigger from this operation as well.
trkey = (schedule.id, fname)
fref = None
# Check to see what type of execution this function is.
for ref in schedule.dag.functions:
if ref.name == fname:
fref = ref
if (trkey in received_triggers and
((len(received_triggers[trkey]) == len(schedule.triggers))
or (fref.type == MULTIEXEC))):
triggers = list(received_triggers[trkey].values())
if fname not in function_cache:
logging.error('%s not in function cache', fname)
utils.generate_error_response(schedule, client, fname)
continue
exec_start = time.time()
# logging.info(f'Executor timer. dag_queue_socket exec_dag: {exec_start}')
# We don't support actual batching for when we receive a
# schedule before a trigger, so everything is just a batch of
# size 1 if anything.
success = exec_dag_function(pusher_cache, client,
[triggers], function_cache[fname],
[schedule], user_library,
dag_runtimes, cache, schedulers,
batching)[0]
user_library.close()
del received_triggers[trkey]
if success:
del queue[fname][schedule.id]
fend = time.time()
fstart = receive_times[(schedule.id, fname)]
runtimes[fname].append(fend - work_start)
exec_counts[fname] += 1
finished_executions[(schedule.id, fname)] = time.time()
elapsed = time.time() - work_start
event_occupancy['dag_queue'] += elapsed
total_occupancy += elapsed
if dag_exec_socket in socks and socks[dag_exec_socket] == zmq.POLLIN:
work_start = time.time()
# logging.info(f'Executor timer. dag_exec_socket recv: {work_start}')
# How many messages to dequeue -- BATCH_SIZE_MAX or 1 depending on
# the function configuration.
if batching:
count = BATCH_SIZE_MAX
else:
count = 1
trigger_keys = set()
for _ in range(count): # Dequeue count number of messages.
trigger = DagTrigger()
try:
msg = dag_exec_socket.recv(zmq.DONTWAIT)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN: # There are no more messages.
break
else:
raise e # Unexpected error.
trigger.ParseFromString(msg)
# We have received a repeated trigger for a function that has
# already finished executing.
if trigger.id in finished_executions:
continue
fname = trigger.target_function
logging.info('Received a trigger for schedule %s, function %s.' %
(trigger.id, fname))
key = (trigger.id, fname)
trigger_keys.add(key)
if key not in received_triggers:
received_triggers[key] = {}
if (trigger.id, fname) not in receive_times:
receive_times[(trigger.id, fname)] = time.time()
received_triggers[key][trigger.source] = trigger
# Only execute the functions for which we have received a schedule.
# Everything else will wait.
for tid, fname in list(trigger_keys):
if fname not in queue or tid not in queue[fname]:
trigger_keys.remove((tid, fname))
if len(trigger_keys) == 0:
continue
fref = None
schedule = queue[fname][list(trigger_keys)[0][0]] # Pick a random schedule to check.
# Check to see what type of execution this function is.
for ref in schedule.dag.functions:
if ref.name == fname:
fref = ref
break
# Compile a list of all the trigger sets for which we have
# enough triggers.
trigger_sets = []
schedules = []
for key in trigger_keys:
if (len(received_triggers[key]) == len(schedule.triggers)) or \
fref.type == MULTIEXEC:
if fref.type == MULTIEXEC:
triggers = [trigger]
else:
triggers = list(received_triggers[key].values())
if fname not in function_cache:
logging.error('%s not in function cache', fname)
utils.generate_error_response(schedule, client, fname)
continue
trigger_sets.append(triggers)
schedule = queue[fname][key[0]]
schedules.append(schedule)
exec_start = time.time()
# logging.info(f'Executor timer. dag_exec_socket exec_dag: {exec_start}')
# Pass all of the trigger_sets into exec_dag_function at once.
# We also include the batching variaible to make sure we know
# whether to pass lists into the fn or not.
if len(trigger_sets) > 0:
successes = exec_dag_function(pusher_cache, client,
trigger_sets,
function_cache[fname],
schedules, user_library,
dag_runtimes, cache,
schedulers, batching)
user_library.close()
del received_triggers[key]
for key, success in zip(trigger_keys, successes):
if success:
del queue[fname][key[0]] # key[0] is trigger.id.
fend = time.time()
fstart = receive_times[key]
average_time = (fend - work_start) / len(trigger_keys)
runtimes[fname].append(average_time)
exec_counts[fname] += 1
finished_executions[(schedule.id, fname)] = time.time()
elapsed = time.time() - work_start
event_occupancy['dag_exec'] += elapsed
total_occupancy += elapsed
if self_depart_socket in socks and socks[self_depart_socket] == \
zmq.POLLIN:
# This message does not matter.
self_depart_socket.recv()
logging.info('Preparing to depart. No longer accepting requests ' +
'and clearing all queues.')
status.ClearField('functions')
status.running = False
utils.push_status(schedulers, pusher_cache, status)
departing = True
# periodically report function occupancy
report_end = time.time()
if report_end - report_start > REPORT_THRESH:
if len(cache) > 100:
extra_keys = list(cache.keys())[:len(cache) - 100]
for key in extra_keys:
del cache[key]
utilization = total_occupancy / (report_end - report_start)
status.utilization = utilization
# Periodically report my status to schedulers with the utilization
# set.
utils.push_status(schedulers, pusher_cache, status)
logging.debug('Total thread occupancy: %.6f' % (utilization))
for event in event_occupancy:
occ = event_occupancy[event] / (report_end - report_start)
logging.debug('\tEvent %s occupancy: %.6f' % (event, occ))
event_occupancy[event] = 0.0
stats = ExecutorStatistics()
for fname in runtimes:
if exec_counts[fname] > 0:
fstats = stats.functions.add()
fstats.name = fname
fstats.call_count = exec_counts[fname]
fstats.runtime.extend(runtimes[fname])
runtimes[fname].clear()
exec_counts[fname] = 0
for dname in dag_runtimes:
dstats = stats.dags.add()
dstats.name = dname
dstats.runtimes.extend(dag_runtimes[dname])
dag_runtimes[dname].clear()
# If we are running in cluster mode, mgmt_ip will be set, and we
# will report our status and statistics to it. Otherwise, we will
# write to the local conf file
if mgmt_ip:
sckt = pusher_cache.get(sutils.get_statistics_report_address
(mgmt_ip))
sckt.send(stats.SerializeToString())
sckt = pusher_cache.get(utils.get_util_report_address(mgmt_ip))
sckt.send(status.SerializeToString())
else:
logging.info(stats)
status.ClearField('utilization')
report_start = time.time()
total_occupancy = 0.0
# Periodically clear any old functions we have cached that we are
# no longer accepting requests for.
del_list = []
for fname in queue:
if len(queue[fname]) == 0 and fname not in status.functions:
del_list.append(fname)
del function_cache[fname]
del runtimes[fname]
del exec_counts[fname]
for fname in del_list:
del queue[fname]
del_list = []
for tid in finished_executions:
if (time.time() - finished_executions[tid]) > 10:
del_list.append(tid)
for tid in del_list:
del finished_executions[tid]
# If we are departing and have cleared our queues, let the
# management server know, and exit the process.
if departing and len(queue) == 0:
sckt = pusher_cache.get(utils.get_depart_done_addr(mgmt_ip))
sckt.send_string(ip)
# We specifically pass 1 as the exit code when ending our
# process so that the wrapper script does not restart us.
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) > 1:
conf_file = sys.argv[1]
else:
conf_file = 'conf/cloudburst-config.yml'
conf = sutils.load_conf(conf_file)
exec_conf = conf['executor']
executor(conf['ip'], conf['mgmt_ip'], exec_conf['scheduler_ips'],
int(exec_conf['thread_id']))
|
py | 1a3d70e4ea818a15d957999eac2d731a937f2acd | #!/usr/bin/python2.7
"""Makes dictionary file from mozc files.
How to use this tool:
$ git clone https://github.com/google/mozc.git
$ tools/make_dictionary_file.py mozc/src/data/dictionary_oss/dictionary*.txt > app/japanese_name_location_dict.txt
"""
import sys
def make_dictionary(input_file_names, output_file_name, numbers):
"""Makes dictionary and writes it to output_file_name.
Args:
input_file_names: a list of file names
Output format:
kanji '\t' yomigana(hiragana) '\n'
kanji '\t' yomigana(hiragana) '\n' ...
"""
yomigana_list = []
for input_file_name in input_file_names:
with open(input_file_name, 'r') as input_file:
for line in input_file:
line = line.rstrip()
split_line = line.split('\t')
id1 = int(split_line[1])
id2 = int(split_line[2])
# e.g. (id1 == id2 == 1845) means "given name"
if id1 in numbers and id1 == id2:
yomigana = split_line[0]
kanji = split_line[4]
yomigana_list.append(kanji + '\t' + yomigana + '\n')
with open(output_file_name, 'w') as output_file:
output_file.writelines(yomigana_list)
def make_jp_name_location_dictionary(input_file_names):
"""Makes japanese name and location dictionary."""
# 1845: id for given names in mozc dictionary
# 1846: id for family names in mozc dictionary
# 1847 ~ 1850: ids for location names in mozc dictionary
numbers = [1845, 1846, 1847, 1848, 1849, 1850]
make_dictionary(input_file_names, 'app/japanese_name_location_dict.txt', numbers)
def main():
dictionaries = sys.argv[1:]
make_jp_name_location_dictionary(dictionaries)
if __name__ == '__main__':
main()
|
py | 1a3d7136fccd964641075f3361dc9f0b68ca91a1 | # import os
# from django.utils.translation import ugettext_lazy as _
DEBUG = True
# LOCAL_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
USE_I18N = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'fm_db_20211015_1_02',
'OPTIONS': {
# 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
'init_command': "SET sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER'",
'charset': 'utf8'
},
'USER': 'root',
'PASSWORD': 'Jiffy@123',
'HOST': 'localhost',
'PORT': '3306',
}
}
|
py | 1a3d72025b9d310a69ea2923d3f9f0a442c0895d | from os import path
import torch
from torch import tensor
import numpy as np
import string
import linecache
class data:
# Assume the data is of this form: SpeakerId Text|AddresseeId Text
def __init__(self, params, voc):
self.params = params
self.voc = voc
# EOS: End of source, start of target
self.EOS = 1
# EOT: End of target
self.EOT = 2
self.padding = 0 # Not used, just a reminder
self.UNK = params.UNK+params.special_word #sel.UNK = 3
def encode(self, tokens, batch_size = 2, mode = "train"):
ids = []
for token in tokens:
if mode == "decode" and batch_size == 1:
### For raw-word data:
try:
ids.append(self.voc[token]+self.params.special_word)
except KeyError:
ids.append(self.UNK)
###--------------------
else:
### For testing data (numbering starts from 1, not 0):
ids.append(int(token)-1+self.params.special_word)
### For data that is already tokenized and transferred to ids:
# ids.append(int(token)+self.params.special_word)
return ids
def read_batch(self, file, num, mode='train_or_test'):
origin = []
sources = np.zeros((self.params.batch_size, self.params.source_max_length+1)) #batch_size*50
targets = np.zeros((self.params.batch_size, self.params.source_max_length+1)) #batch_size*50
speaker_label = -np.ones(self.params.batch_size) #all speaker IDs are set to -1
addressee_label = -np.ones(self.params.batch_size)
l_s_set = set()
l_t_set = set()
END=0
a=0
for i in range(self.params.batch_size):
if mode == "decode" and self.params.batch_size == 1:
line = file.strip().split("|")
else:
line = linecache.getline(file,num*self.params.batch_size+i+1).strip().split("|")
i-=a #to adjust for skipped lines
if line == ['']:
END = 1
break
s = line[-2].split()[:self.params.source_max_length]
t = line[-1].split()[:self.params.target_max_length]
#skipping lines when Speaker or Addressee speech is empty
if s[1:]==[]: #if only one word in Source (i.e Speaker ID)
a+=1
continue
elif t[1:]==[] and mode!='decode': #if only one word in Target (i.e Addressee ID) AND mode!='decode'
a+=1
continue
if self.params.SpeakerMode or self.params.AddresseeMode:
source=self.encode(s[1:], self.params.batch_size, mode) #encoding speech of the speaker
target=[self.EOS]+self.encode(t[1:], self.params.batch_size, mode)+[self.EOT] #encoding speech of the addressee
else:
source=self.encode(s[0:], self.params.batch_size, mode) #encoding speech of the speaker
target=[self.EOS]+self.encode(t[0:], self.params.batch_size, mode)+[self.EOT] #encoding speech of the addressee
l_s=len(source) #length of Source
l_t=len(target) #length of Target
l_s_set.add(l_s)
l_t_set.add(l_t)
### If the data contains words, not numbers:
# origin.append(' '.join(s[1:]))
origin.append(source)
sources[i, :l_s]=source #last few elements will be 0
targets[i, :l_t]=target #last few elements will be 0
if mode!='decode':
try:
speaker_label[i]=int(s[0])-1 #speaker id (zero-indexed)
addressee_label[i]=int(t[0])-1 #addressee id (zero-indexed)
except:
print('Persona id cannot be transferred to numbers')
i+=1
try:
max_l_s=max(l_s_set) #length of longest Source sentence in the batch
max_l_t=max(l_t_set) #length of longest Target sentence in the batch
except ValueError:
return END,None,None,None,None,None,None,None
if max_l_s == 0:
return END,None,None,None,None,None,None,None
elif max_l_t == 2 and mode != 'decode':
return END,None,None,None,None,None,None,None
sources=sources[:i, : max_l_s] #cutting everything beyong max_l_s
targets=targets[:i, : max_l_t] #cutting everything beyong max_l_t
speaker_label=speaker_label[:i]
addressee_label=addressee_label[:i]
length_s=(sources!=0).sum(1) #batch_size, each element is sum of number of words in each sample (includes speaker IDs)
mask_t=np.ones(targets.shape)*(targets!=0) # batch_size*max_l_t; 1 in place where the words exist in target, elsewhere 0
token_num=mask_t[:,1:].sum() #total number of words in Target for each batch (not including Addressee IDs)
return END,tensor(sources).long(),tensor(targets).long(),tensor(speaker_label).long(),tensor(addressee_label).long(),tensor(length_s).long(),token_num,origin
|
py | 1a3d720b26c92c0d4480c14ca906e5de87b29506 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Copyright (c) 2020-2021 The Hive Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there is a double-spend conflict."""
from test_framework.test_framework import HiveTestFramework
from test_framework.util import disconnect_nodes, assert_equal, Decimal, sync_blocks, find_output, connect_nodes
class TxnMallTest(HiveTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.extra_args = [["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"], ["-maxreorg=10000"]]
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super().setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 125,000 HVN:
starting_balance = 125000
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 121900)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 2900)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 121900 - 2900 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1240 HVN to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {"txid": fund_foo_txid, "vout": find_output(self.nodes[0], fund_foo_txid, 121900)}
rawtx_input_1 = {"txid": fund_bar_txid, "vout": find_output(self.nodes[0], fund_bar_txid, 2900)}
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {node1_address: 124000, change_address: 124800 - 124000 + doublespend_fee}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 HVN coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 4000, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 2000, 0)
# Have node0 mine a block:
if self.options.mine_block:
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50HVN for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 5000
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 121900+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 2900+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100HVN for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 10000 - 124000 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 121900)
assert_equal(self.nodes[0].getbalance("bar"), 2900)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-121900
- 2900
-124000
+ 10000
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 124000)
if __name__ == '__main__':
TxnMallTest().main()
|
py | 1a3d725dce91c48df13dc5ba2f07f6d24fe3841a | # Copyright (c) 2022, Rahib Hassan and Contributors
# See license.txt
import frappe
import unittest
def create_item(item_code):
if frappe.db.exists('Item', item_code):
return frappe.get_doc('Item', item_code)
item = frappe.get_doc({
'doctype': 'Item',
'item_code': item_code,
'item_name': item_code,
'maintain_stock': 1,
'default_warehouse': 'All Warehouse',
'opening_stock': 100,
'valuation_rate': 200
}).insert()
return item
class TestItem(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
frappe.db.rollback()
def test_stock_entry_creation(self):
item = create_item("Iron")
stock_entries = frappe.db.get_list('Stock Entry', {'stock_entry_type': 'Material Receipt'})
for d in stock_entries:
child_entry = frappe.db.get_list('Stock Entry Item', {'parent': d.name}, ['item_code'])
if child_entry[0].item_code == 'Iron':
return
frappe.throw("Stock Entry not created") |
py | 1a3d72e0d6bdd6144bfb0b2dac02cfad21b60df8 | import torch
import torchvision.models
from torchvision.ops import MultiScaleRoIAlign
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
import unittest
class Tester(unittest.TestCase):
def _make_empty_sample(self, add_masks=False, add_keypoints=False):
images = [torch.rand((3, 100, 100), dtype=torch.float32)]
boxes = torch.zeros((0, 4), dtype=torch.float32)
negative_target = {"boxes": boxes,
"labels": torch.zeros(0, dtype=torch.int64),
"image_id": 4,
"area": (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]),
"iscrowd": torch.zeros((0,), dtype=torch.int64)}
if add_masks:
negative_target["masks"] = torch.zeros(0, 100, 100, dtype=torch.uint8)
if add_keypoints:
negative_target["keypoints"] = torch.zeros(0, 17, 3, dtype=torch.float32)
targets = [negative_target]
return images, targets
def test_targets_to_anchors(self):
_, targets = self._make_empty_sample()
anchors = [torch.randint(-50, 50, (3, 4), dtype=torch.float32)]
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(
anchor_sizes, aspect_ratios
)
rpn_head = RPNHead(4, rpn_anchor_generator.num_anchors_per_location()[0])
head = RegionProposalNetwork(
rpn_anchor_generator, rpn_head,
0.5, 0.3,
256, 0.5,
2000, 2000, 0.7)
labels, matched_gt_boxes = head.assign_targets_to_anchors(anchors, targets)
self.assertEqual(labels[0].sum(), 0)
self.assertEqual(labels[0].shape, torch.Size([anchors[0].shape[0]]))
self.assertEqual(labels[0].dtype, torch.float32)
self.assertEqual(matched_gt_boxes[0].sum(), 0)
self.assertEqual(matched_gt_boxes[0].shape, anchors[0].shape)
self.assertEqual(matched_gt_boxes[0].dtype, torch.float32)
def test_assign_targets_to_proposals(self):
proposals = [torch.randint(-50, 50, (20, 4), dtype=torch.float32)]
gt_boxes = [torch.zeros((0, 4), dtype=torch.float32)]
gt_labels = [torch.tensor([[0]], dtype=torch.int64)]
box_roi_pool = MultiScaleRoIAlign(
featmap_names=['0', '1', '2', '3'],
output_size=7,
sampling_ratio=2)
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(
4 * resolution ** 2,
representation_size)
representation_size = 1024
box_predictor = FastRCNNPredictor(
representation_size,
2)
roi_heads = RoIHeads(
# Box
box_roi_pool, box_head, box_predictor,
0.5, 0.5,
512, 0.25,
None,
0.05, 0.5, 100)
matched_idxs, labels = roi_heads.assign_targets_to_proposals(proposals, gt_boxes, gt_labels)
self.assertEqual(matched_idxs[0].sum(), 0)
self.assertEqual(matched_idxs[0].shape, torch.Size([proposals[0].shape[0]]))
self.assertEqual(matched_idxs[0].dtype, torch.int64)
self.assertEqual(labels[0].sum(), 0)
self.assertEqual(labels[0].shape, torch.Size([proposals[0].shape[0]]))
self.assertEqual(labels[0].dtype, torch.int64)
def test_forward_negative_sample_frcnn(self):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
num_classes=2, min_size=100, max_size=100)
images, targets = self._make_empty_sample()
loss_dict = model(images, targets)
self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.))
def test_forward_negative_sample_mrcnn(self):
model = torchvision.models.detection.maskrcnn_resnet50_fpn(
num_classes=2, min_size=100, max_size=100)
images, targets = self._make_empty_sample(add_masks=True)
loss_dict = model(images, targets)
self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_mask"], torch.tensor(0.))
def test_forward_negative_sample_krcnn(self):
model = torchvision.models.detection.keypointrcnn_resnet50_fpn(
num_classes=2, min_size=100, max_size=100)
images, targets = self._make_empty_sample(add_keypoints=True)
loss_dict = model(images, targets)
self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.))
self.assertEqual(loss_dict["loss_keypoint"], torch.tensor(0.))
if __name__ == '__main__':
unittest.main()
|
py | 1a3d733bbc21bbf763ffed3832a3eb6dbfef9f73 | # -*- coding: utf-8 -*-
import base64
import json
import uuid
from unittest import skip
from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory, TestCase
from django.utils.encoding import force_bytes
from hooked.models import (TransactionStatus, WebHookClientApp,
WebHookTransaction)
from hooked.tokens import generate_token
from .factories.requests import WebHookRequestFactory
from .factories.views import WebHookViewFactory
class TestReceiversMiddelware(TestCase):
def setUp(self):
self.payload = str('{"foo": 1}')
self.app = WebHookClientApp.objects.create(
name='Pirate', identifier=uuid.uuid4(),
secret="pirate", need_authorization=True
)
# returns 401 if X_HOOKED_TOKEN is missing
def test_transaction_hook_wihout_token_and_return_401(self):
request = WebHookRequestFactory().perform(data=self.payload)
response = WebHookViewFactory.as_view()(request)
self.assertEqual(response.status_code, 401)
# roken is invalid
def test_invalid_authorized_request_transaction_hook_and_return_403(self):
request = WebHookRequestFactory(
token='wrong', app_id=self.app.identifier
).perform(data=self.payload)
response = WebHookViewFactory.as_view()(request)
self.assertEqual(response.status_code, 403)
# everything is OK, return 200
def test_valid_authorized_request_transaction_hook_and_return_201(self):
token = generate_token(self.app.secret, self.payload)
request = WebHookRequestFactory(
token=token, app_id=self.app.identifier
).perform(data=self.payload)
response = WebHookViewFactory.as_view()(request)
self.assertEqual(response.status_code, 201)
|
py | 1a3d7353c5a800203b807d764729d6d875a9ff8c | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow import models
from airflow.utils import db
jira_client_mock = Mock(
name="jira_client"
)
class TestJiraHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jira_default', conn_type='jira',
host='https://localhost/jira/', port=443,
extra='{"verify": "False", "project": "AIRFLOW"}'))
@patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True,
return_value=jira_client_mock)
def test_jira_client_connection(self, jira_mock):
jira_hook = JiraHook()
assert jira_mock.called
self.assertIsInstance(jira_hook.client, Mock)
self.assertEqual(jira_hook.client.name, jira_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
|
py | 1a3d73a6c52da2deb3d1d2f1db4c3862bf7713d4 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
def closeAll():
""" closeAll()
Closes all figures.
"""
for fig in vv.BaseFigure._figures.values():
fig.Destroy()
|
py | 1a3d7567e98b0ee32327273b586ca07c58d3b54c | import unittest
from test import support
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import warnings
import collections
import collections.abc
import itertools
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __eq__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps:
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.basetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(self.s)
data = self.thetype(self.s)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# Set iterators unpickle as list iterators due to the
# undefined order of set items.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(self.thetype(it), data)
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(self.thetype(it), data - self.thetype((drop,)))
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == set:
self.assertEqual(repr(s), '{set(...)}')
else:
name = repr(s).partition('(')[0] # strip class name
self.assertEqual(repr(s), '%s({%s(...)})' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(support.TESTFN, "w")
try:
fo.write(str(s))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, range(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.thetype)
class TestSet(TestJointOps, unittest.TestCase):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal(self):
s = set([1,2,3])
t = {1,2,3}
self.assertEqual(s, t)
def test_set_literal_insertion_order(self):
# SF Issue #26020 -- Expect left to right insertion
s = {1, 1.0, True}
self.assertEqual(len(s), 1)
stored_value = s.pop()
self.assertEqual(type(stored_value), int)
def test_set_literal_evaluation_order(self):
# Expect left to right expression evaluation
events = []
def record(obj):
events.append(obj)
s = {record(1), record(2), record(3)}
self.assertEqual(events, [1, 2, 3])
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self):
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the builtin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = {1, 2, 3}
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
@unittest.skipUnless(hasattr(set, "test_c_api"),
'C API test only available in a debug build')
def test_c_api(self):
self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
basetype = set
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps, unittest.TestCase):
thetype = frozenset
basetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(range(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
self.assertEqual(len(set(map(id, efs))), 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in range(n)]
results = set()
for i in range(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = list(range(10)) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in range(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
def zf_range(n):
# https://en.wikipedia.org/wiki/Set-theoretic_definition_of_natural_numbers
nums = [frozenset()]
for i in range(n-1):
num = frozenset(nums)
nums.append(num)
return nums[:n]
def powerset(s):
for i in range(len(s)+1):
yield from map(frozenset, itertools.combinations(s, i))
for n in range(18):
t = 2 ** n
mask = t - 1
for nums in (range, zf_range):
u = len({h & mask for h in map(hash, powerset(nums(n)))})
self.assertGreater(4*u, t)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
basetype = frozenset
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(range(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps:
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_print(self):
try:
fo = open(support.TESTFN, "w")
fo.write(str(self.set))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.set, proto)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set()"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{3}"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{(0, 'zero')}"
def test_in(self):
self.assertIn((0, "zero"), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "bytes set"
self.values = [b"a", b"b", b"c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsMixedStringBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self._warning_filters = support.check_warnings()
self._warning_filters.__enter__()
warnings.simplefilter('ignore', BytesWarning)
self.case = "string and bytes set"
self.values = ["a", "b", b"a", b"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
def tearDown(self):
self._warning_filters.__exit__(None, None, None)
def test_repr(self):
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(range(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets:
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets, unittest.TestCase):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets, unittest.TestCase):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets, unittest.TestCase):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps:
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying:
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s)), key=repr), sorted(g(s), key=repr))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual, key=repr), sorted(expected, key=repr))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
class bad_eq:
def __eq__(self, other):
if be_bad:
set2.clear()
raise ZeroDivisionError
return self is other
def __hash__(self):
return 0
class bad_dict_clear:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
class TestWeirdBugs(unittest.TestCase):
def test_8420_set_merge(self):
# This used to segfault
global be_bad, set2, dict2
be_bad = False
set1 = {bad_eq()}
set2 = {bad_eq() for i in range(75)}
be_bad = True
self.assertRaises(ZeroDivisionError, set1.update, set2)
be_bad = False
set1 = {bad_dict_clear()}
dict2 = {bad_dict_clear(): None}
be_bad = True
set1.symmetric_difference_update(dict2)
def test_iter_and_mutate(self):
# Issue #24581
s = set(range(100))
s.clear()
s.update(range(100))
si = iter(s)
s.clear()
a = list(range(100))
s.update(range(100))
list(si)
def test_merge_and_mutate(self):
class X:
def __hash__(self):
return hash(0)
def __eq__(self, o):
other.clear()
return False
other = set()
other = {X() for i in range(10)}
s = {0}
s.update(other)
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([next(U)])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 identical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
#==============================================================================
if __name__ == "__main__":
unittest.main()
|
py | 1a3d758964fd93fbe9c2e14e11dc0b859b502995 | import argparse
import errno
import os
import re
import sys
from argparse import RawDescriptionHelpFormatter
from textwrap import dedent
from urllib.parse import urlsplit
from requests.utils import get_netrc_auth
from .argtypes import (
AuthCredentials, KeyValueArgType, PARSED_DEFAULT_FORMAT_OPTIONS,
parse_auth,
parse_format_options,
)
from .constants import (
HTTP_GET, HTTP_POST, OUTPUT_OPTIONS, OUTPUT_OPTIONS_DEFAULT,
OUTPUT_OPTIONS_DEFAULT_OFFLINE, OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED,
OUT_RESP_BODY, PRETTY_MAP, PRETTY_STDOUT_TTY_ONLY, RequestType,
SEPARATOR_CREDENTIALS,
SEPARATOR_GROUP_ALL_ITEMS, SEPARATOR_GROUP_DATA_ITEMS, URL_SCHEME_RE,
)
from .exceptions import ParseError
from .requestitems import RequestItems
from ..context import Environment
from ..plugins.registry import plugin_manager
from ..utils import ExplicitNullAuth, get_content_type
class HTTPieHelpFormatter(RawDescriptionHelpFormatter):
"""A nicer help formatter.
Help for arguments can be indented and contain new lines.
It will be de-dented and arguments in the help
will be separated by a blank line for better readability.
"""
def __init__(self, max_help_position=6, *args, **kwargs):
# A smaller indent for args help.
kwargs['max_help_position'] = max_help_position
super().__init__(*args, **kwargs)
def _split_lines(self, text, width):
text = dedent(text).strip() + '\n\n'
return text.splitlines()
# TODO: refactor and design type-annotated data structures
# for raw args + parsed args and keep things immutable.
class HTTPieArgumentParser(argparse.ArgumentParser):
"""Adds additional logic to `argparse.ArgumentParser`.
Handles all input (CLI args, file args, stdin), applies defaults,
and performs extra validation.
"""
def __init__(self, *args, formatter_class=HTTPieHelpFormatter, **kwargs):
kwargs['add_help'] = False
super().__init__(*args, formatter_class=formatter_class, **kwargs)
self.env = None
self.args = None
self.has_stdin_data = False
self.has_input_data = False
# noinspection PyMethodOverriding
def parse_args(
self,
env: Environment,
args=None,
namespace=None
) -> argparse.Namespace:
self.env = env
self.args, no_options = super().parse_known_args(args, namespace)
if self.args.debug:
self.args.traceback = True
self.has_stdin_data = (
self.env.stdin
and not self.args.ignore_stdin
and not self.env.stdin_isatty
)
self.has_input_data = self.has_stdin_data or self.args.raw is not None
# Arguments processing and environment setup.
self._apply_no_options(no_options)
self._process_request_type()
self._process_download_options()
self._setup_standard_streams()
self._process_output_options()
self._process_pretty_options()
self._process_format_options()
self._guess_method()
self._parse_items()
self._process_url()
self._process_auth()
if self.args.raw is not None:
self._body_from_input(self.args.raw)
elif self.has_stdin_data:
self._body_from_file(self.env.stdin)
if self.args.compress:
# TODO: allow --compress with --chunked / --multipart
if self.args.chunked:
self.error('cannot combine --compress and --chunked')
if self.args.multipart:
self.error('cannot combine --compress and --multipart')
return self.args
def _process_request_type(self):
request_type = self.args.request_type
self.args.json = request_type is RequestType.JSON
self.args.multipart = request_type is RequestType.MULTIPART
self.args.form = request_type in {
RequestType.FORM,
RequestType.MULTIPART,
}
def _process_url(self):
if self.args.url.startswith('://'):
# Paste URL & add space shortcut: `http ://pie.dev` → `http://pie.dev`
self.args.url = self.args.url[3:]
if not URL_SCHEME_RE.match(self.args.url):
if os.path.basename(self.env.program_name) == 'https':
scheme = 'https://'
else:
scheme = self.args.default_scheme + '://'
# See if we're using curl style shorthand for localhost (:3000/foo)
shorthand = re.match(r'^:(?!:)(\d*)(/?.*)$', self.args.url)
if shorthand:
port = shorthand.group(1)
rest = shorthand.group(2)
self.args.url = scheme + 'localhost'
if port:
self.args.url += ':' + port
self.args.url += rest
else:
self.args.url = scheme + self.args.url
# noinspection PyShadowingBuiltins
def _print_message(self, message, file=None):
# Sneak in our stderr/stdout.
file = {
sys.stdout: self.env.stdout,
sys.stderr: self.env.stderr,
None: self.env.stderr
}.get(file, file)
if not hasattr(file, 'buffer') and isinstance(message, str):
message = message.encode(self.env.stdout_encoding)
super()._print_message(message, file)
def _setup_standard_streams(self):
"""
Modify `env.stdout` and `env.stdout_isatty` based on args, if needed.
"""
self.args.output_file_specified = bool(self.args.output_file)
if self.args.download:
# FIXME: Come up with a cleaner solution.
if not self.args.output_file and not self.env.stdout_isatty:
# Use stdout as the download output file.
self.args.output_file = self.env.stdout
# With `--download`, we write everything that would normally go to
# `stdout` to `stderr` instead. Let's replace the stream so that
# we don't have to use many `if`s throughout the codebase.
# The response body will be treated separately.
self.env.stdout = self.env.stderr
self.env.stdout_isatty = self.env.stderr_isatty
elif self.args.output_file:
# When not `--download`ing, then `--output` simply replaces
# `stdout`. The file is opened for appending, which isn't what
# we want in this case.
self.args.output_file.seek(0)
try:
self.args.output_file.truncate()
except OSError as e:
if e.errno == errno.EINVAL:
# E.g. /dev/null on Linux.
pass
else:
raise
self.env.stdout = self.args.output_file
self.env.stdout_isatty = False
if self.args.quiet:
self.env.stderr = self.env.devnull
if not (self.args.output_file_specified and not self.args.download):
self.env.stdout = self.env.devnull
def _process_auth(self):
# TODO: refactor & simplify this method.
self.args.auth_plugin = None
default_auth_plugin = plugin_manager.get_auth_plugins()[0]
auth_type_set = self.args.auth_type is not None
url = urlsplit(self.args.url)
if self.args.auth is None and not auth_type_set:
if url.username is not None:
# Handle http://username:password@hostname/
username = url.username
password = url.password or ''
self.args.auth = AuthCredentials(
key=username,
value=password,
sep=SEPARATOR_CREDENTIALS,
orig=SEPARATOR_CREDENTIALS.join([username, password])
)
if self.args.auth is not None or auth_type_set:
if not self.args.auth_type:
self.args.auth_type = default_auth_plugin.auth_type
plugin = plugin_manager.get_auth_plugin(self.args.auth_type)()
if (not self.args.ignore_netrc
and self.args.auth is None
and plugin.netrc_parse):
# Only host needed, so it’s OK URL not finalized.
netrc_credentials = get_netrc_auth(self.args.url)
if netrc_credentials:
self.args.auth = AuthCredentials(
key=netrc_credentials[0],
value=netrc_credentials[1],
sep=SEPARATOR_CREDENTIALS,
orig=SEPARATOR_CREDENTIALS.join(netrc_credentials)
)
if plugin.auth_require and self.args.auth is None:
self.error('--auth required')
plugin.raw_auth = self.args.auth
self.args.auth_plugin = plugin
already_parsed = isinstance(self.args.auth, AuthCredentials)
if self.args.auth is None or not plugin.auth_parse:
self.args.auth = plugin.get_auth()
else:
if already_parsed:
# from the URL
credentials = self.args.auth
else:
credentials = parse_auth(self.args.auth)
if (not credentials.has_password()
and plugin.prompt_password):
if self.args.ignore_stdin:
# Non-tty stdin read by now
self.error(
'Unable to prompt for passwords because'
' --ignore-stdin is set.'
)
credentials.prompt_password(url.netloc)
self.args.auth = plugin.get_auth(
username=credentials.key,
password=credentials.value,
)
if not self.args.auth and self.args.ignore_netrc:
# Set a no-op auth to force requests to ignore .netrc
# <https://github.com/psf/requests/issues/2773#issuecomment-174312831>
self.args.auth = ExplicitNullAuth()
def _apply_no_options(self, no_options):
"""For every `--no-OPTION` in `no_options`, set `args.OPTION` to
its default value. This allows for un-setting of options, e.g.,
specified in config.
"""
invalid = []
for option in no_options:
if not option.startswith('--no-'):
invalid.append(option)
continue
# --no-option => --option
inverted = '--' + option[5:]
for action in self._actions:
if inverted in action.option_strings:
setattr(self.args, action.dest, action.default)
break
else:
invalid.append(option)
if invalid:
self.error(f'unrecognized arguments: {" ".join(invalid)}')
def _body_from_file(self, fd):
"""Read the data from a file-like object.
Bytes are always read.
"""
self._ensure_one_data_source(self.args.data, self.args.files)
self.args.data = getattr(fd, 'buffer', fd)
def _body_from_input(self, data):
"""Read the data from the CLI.
"""
self._ensure_one_data_source(self.has_stdin_data, self.args.data,
self.args.files)
self.args.data = data.encode()
def _ensure_one_data_source(self, *other_sources):
"""There can only be one source of input request data.
"""
if any(other_sources):
self.error('Request body (from stdin, --raw or a file) and request '
'data (key=value) cannot be mixed. Pass '
'--ignore-stdin to let key/value take priority. '
'See https://httpie.io/docs#scripting for details.')
def _guess_method(self):
"""Set `args.method` if not specified to either POST or GET
based on whether the request has data or not.
"""
if self.args.method is None:
# Invoked as `http URL'.
assert not self.args.request_items
if self.has_input_data:
self.args.method = HTTP_POST
else:
self.args.method = HTTP_GET
# FIXME: False positive, e.g., "localhost" matches but is a valid URL.
elif not re.match('^[a-zA-Z]+$', self.args.method):
# Invoked as `http URL item+'. The URL is now in `args.method`
# and the first ITEM is now incorrectly in `args.url`.
try:
# Parse the URL as an ITEM and store it as the first ITEM arg.
self.args.request_items.insert(0, KeyValueArgType(
*SEPARATOR_GROUP_ALL_ITEMS).__call__(self.args.url))
except argparse.ArgumentTypeError as e:
if self.args.traceback:
raise
self.error(e.args[0])
else:
# Set the URL correctly
self.args.url = self.args.method
# Infer the method
has_data = (
self.has_input_data
or any(
item.sep in SEPARATOR_GROUP_DATA_ITEMS
for item in self.args.request_items)
)
self.args.method = HTTP_POST if has_data else HTTP_GET
def _parse_items(self):
"""
Parse `args.request_items` into `args.headers`, `args.data`,
`args.params`, and `args.files`.
"""
try:
request_items = RequestItems.from_args(
request_item_args=self.args.request_items,
request_type=self.args.request_type,
)
except ParseError as e:
if self.args.traceback:
raise
self.error(e.args[0])
else:
self.args.headers = request_items.headers
self.args.data = request_items.data
self.args.files = request_items.files
self.args.params = request_items.params
self.args.multipart_data = request_items.multipart_data
if self.args.files and not self.args.form:
# `http url @/path/to/file`
request_file = None
for key, file in self.args.files.items():
if key != '':
self.error(
'Invalid file fields (perhaps you meant --form?):'
f' {",".join(self.args.files.keys())}')
if request_file is not None:
self.error("Can't read request from multiple files")
request_file = file
fn, fd, ct = request_file
self.args.files = {}
self._body_from_file(fd)
if 'Content-Type' not in self.args.headers:
content_type = get_content_type(fn)
if content_type:
self.args.headers['Content-Type'] = content_type
def _process_output_options(self):
"""Apply defaults to output options, or validate the provided ones.
The default output options are stdout-type-sensitive.
"""
def check_options(value, option):
unknown = set(value) - OUTPUT_OPTIONS
if unknown:
self.error(f'Unknown output options: {option}={",".join(unknown)}')
if self.args.verbose:
self.args.all = True
if self.args.output_options is None:
if self.args.verbose:
self.args.output_options = ''.join(OUTPUT_OPTIONS)
elif self.args.offline:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT_OFFLINE
elif not self.env.stdout_isatty:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT_STDOUT_REDIRECTED
else:
self.args.output_options = OUTPUT_OPTIONS_DEFAULT
if self.args.output_options_history is None:
self.args.output_options_history = self.args.output_options
check_options(self.args.output_options, '--print')
check_options(self.args.output_options_history, '--history-print')
if self.args.download and OUT_RESP_BODY in self.args.output_options:
# Response body is always downloaded with --download and it goes
# through a different routine, so we remove it.
self.args.output_options = str(
set(self.args.output_options) - set(OUT_RESP_BODY))
def _process_pretty_options(self):
if self.args.prettify == PRETTY_STDOUT_TTY_ONLY:
self.args.prettify = PRETTY_MAP[
'all' if self.env.stdout_isatty else 'none']
elif (self.args.prettify and self.env.is_windows
and self.args.output_file):
self.error('Only terminal output can be colorized on Windows.')
else:
# noinspection PyTypeChecker
self.args.prettify = PRETTY_MAP[self.args.prettify]
def _process_download_options(self):
if self.args.offline:
self.args.download = False
self.args.download_resume = False
return
if not self.args.download:
if self.args.download_resume:
self.error('--continue only works with --download')
if self.args.download_resume and not (
self.args.download and self.args.output_file):
self.error('--continue requires --output to be specified')
def _process_format_options(self):
format_options = self.args.format_options or []
parsed_options = PARSED_DEFAULT_FORMAT_OPTIONS
for options_group in format_options:
parsed_options = parse_format_options(options_group, defaults=parsed_options)
self.args.format_options = parsed_options
|
py | 1a3d77bb379861b8688791c83297a39086da49a9 | import _plotly_utils.basevalidators
class MeansrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="meansrc", parent_name="box", **kwargs):
super(MeansrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
py | 1a3d7820b665acbe9f189a16ca0830d3f1f97e7f | #
# Copyright 2019 Altran. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
Using this function we can easily fetch model object by giving model name
"""
from django.apps import apps
def get_model_class(model_name):
"""
This is intended to replace all our varied ways of identifying a particular model class.
This uses the standard django name for any model class.
:param model_name: string of the form <app name>.<model class name>
:return: model class
"""
(app_label, model_class_name) = model_name.split('.')
return apps.get_model(app_label=app_label, model_name=model_class_name)
|
py | 1a3d782674ad6a833db501303df49b2260171b5e | from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.template.defaultfilters import filesizeformat
from django.utils.translation import gettext_lazy as _
from .models import Attachment
def validate_max_size(data):
if (
hasattr(settings, "FILE_UPLOAD_MAX_SIZE")
and data.size > settings.FILE_UPLOAD_MAX_SIZE
):
raise forms.ValidationError(
_("File exceeds maximum size of {size}").format(
size=filesizeformat(settings.FILE_UPLOAD_MAX_SIZE)
)
)
class AttachmentForm(forms.ModelForm):
attachment_file = forms.FileField(
label=_("Upload attachment"), validators=[validate_max_size], max_length=32787
)
class Meta:
model = Attachment
fields = ("attachment_file",)
def save(self, request, obj, *args, **kwargs):
self.instance.creator = request.user
self.instance.content_type = ContentType.objects.get_for_model(obj)
self.instance.object_id = obj.pk
super(AttachmentForm, self).save(*args, **kwargs)
|
py | 1a3d78e136c58be29e5d26c24ca379a7094f00a7 | import unittest
import pytest
from tensortools.abc import abstract
class TestAbc(unittest.TestCase):
@abstract
def an_abstract_method(self):
pass
def test_abstract(self):
with pytest.raises(NotImplementedError):
self.an_abstract_method()
|
py | 1a3d7a6a01780a64671f5dfd70136222b8528e44 | # ex: set sts=4 ts=4 sw=4 noet:
# -*- coding: utf-8 -*-
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
"""
from unittest.mock import patch
from datalad.config import ConfigManager
from datalad.distribution.dataset import Dataset
from ..credman import (
CredentialManager,
_get_cred_cfg_var,
)
from datalad.support.keyring_ import MemoryKeyring
from datalad.tests.utils import (
assert_in,
assert_not_in,
assert_raises,
eq_,
neq_,
patch_config,
with_tempfile,
)
def test_credmanager():
# we want all tests to bypass the actual system keyring
with patch('datalad.support.keyring_.keyring', MemoryKeyring()):
check_credmanager()
def check_credmanager():
cfg = ConfigManager()
credman = CredentialManager(cfg)
# doesn't work with thing air
assert_raises(ValueError, credman.get)
eq_(credman.get('donotexiststest'), None)
eq_(credman.get(crazy='empty'), None)
# smoke test for legacy credential retrieval code
eq_(credman.get('donotexiststest', type='user_password'), None)
# does not fiddle with a secret that is readily provided
eq_(credman.get('dummy', secret='mike', _type_hint='token'),
dict(type='token', secret='mike'))
# no instructions what to do, no legacy entry, nothing was changed
# but the secret was written to the keystore
eq_(credman.set('mycred', secret='some'), dict(secret='some'))
# redo but with timestep
setprops = credman.set('lastusedcred', _lastused=True, secret='some')
assert_in('last-used', setprops)
# now re-set, based on the retrieved info, but update the timestamp
setprops_new = credman.set('lastusedcred', _lastused=True,
**credman.get('lastusedcred'))
# must have updated 'last-used'
neq_(setprops['last-used'], setprops_new['last-used'])
# first property store attempt
eq_(credman.set('changed', secret='some', prop='val'),
dict(secret='some', prop='val'))
# second, no changing the secret, but changing the prop, albeit with
# the same value, change report should be empty
eq_(credman.set('changed', prop='val'), dict())
# change secret, with value pulled from config
try:
cfg.set('datalad.credential.changed.secret', 'envsec',
scope='override')
eq_(credman.set('changed', secret=None), dict(secret='envsec'))
finally:
cfg.unset('datalad.credential.changed.secret', scope='override')
# remove non-existing property, secret not report, because unchanged
eq_(credman.set('mycred', dummy=None), dict(dummy=None))
assert_not_in(_get_cred_cfg_var("mycred", "dummy"), cfg)
# set property
eq_(credman.set('mycred', dummy='good', this='that'),
dict(dummy='good', this='that'))
# ensure set
eq_(credman.get('mycred'), dict(dummy='good', this='that', secret='some'))
# remove individual property
eq_(credman.set('mycred', dummy=None), dict(dummy=None))
# ensure removal
eq_(credman.get('mycred'), dict(this='that', secret='some'))
# test full query and constrained query
q = list(credman.query_())
eq_(len(q), 3)
# now query for one of the creds created above
q = list(credman.query_(prop='val'))
eq_(len(q), 1)
eq_(q[0][0], 'changed')
eq_(q[0][1]['prop'], 'val')
# and now a query with no match
q = list(credman.query_(prop='val', funky='town'))
eq_(len(q), 0)
# remove complete credential
credman.remove('mycred')
eq_(credman.get('mycred'), None)
@with_tempfile
def test_credman_local(path):
ds = Dataset(path).create(result_renderer='disabled')
credman = CredentialManager(ds.config)
# deposit a credential into the dataset's config, and die trying to
# remove it
ds.config.set('datalad.credential.stupid.secret', 'really', scope='branch')
assert_raises(RuntimeError, credman.remove, 'stupid')
# but it manages for the local scope
ds.config.set('datalad.credential.notstupid.secret', 'really', scope='local')
credman.remove('notstupid')
def test_query():
# we want all tests to bypass the actual system keyring
with patch('datalad.support.keyring_.keyring', MemoryKeyring()):
check_query()
def check_query():
cfg = ConfigManager()
credman = CredentialManager(cfg)
# set a bunch of credentials with a common realm AND timestamp
for i in range(3):
credman.set(
f'cred.{i}',
_lastused=True,
secret=f'diff{i}',
realm='http://ex.com/login',
)
# now a credential with the common realm, but without a timestamp
credman.set(
'cred.no.time',
_lastused=False,
secret='notime',
realm='http://ex.com/login',
)
# and the most recent one (with timestamp) is an unrelated one
credman.set('unrelated', _lastused=True, secret='unrelated')
# now we want all credentials that match the realm, sorted by
# last-used timestamp -- most recent first
slist = credman.query(realm='http://ex.com/login', _sortby='last-used')
eq_(['cred.2', 'cred.1', 'cred.0', 'cred.no.time'],
[i[0] for i in slist])
# same now, but least recent first, importantly no timestamp stays last
slist = credman.query(realm='http://ex.com/login', _sortby='last-used',
_reverse=False)
eq_(['cred.0', 'cred.1', 'cred.2', 'cred.no.time'],
[i[0] for i in slist])
|
py | 1a3d7abe4ced24124e90205b18ab02d190419a0e | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(i_1, y)))
h_y = Hint("h_y0", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_20), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(x, y)))
loc1 = Location(env, mgr.GE(x, i_2), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x2", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, mgr.Plus(pc, i_1)))
loc1 = Location(env, mgr.GT(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc0", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.GT(x_x, y))
loc2 = Location(env, mgr.GE(x, i_2))
loc2.set_progress(0, mgr.GE(x_x, i_20))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(0, mgr.Equals(x_y, mgr.Times(y, y)))
h_y = Hint("h_y5", env, frozenset([y]), symbs)
h_y.set_locs([loc0])
res.append(h_y)
loc0 = Location(env, mgr.LE(x, i_0))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, x)))
loc1 = Location(env, mgr.GE(x, i_0))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x5", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(y, y)))
loc1 = Location(env, mgr.GE(y, i_0))
loc1.set_progress(0, mgr.GE(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y6", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, x), i_1)))
loc1 = Location(env, mgr.GE(x, i_20))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x6", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.LE(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.LE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, i_0), mgr.GE(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, pc)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y7", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
return frozenset(res)
|
py | 1a3d7b6c567b3cabb8a34b5bcf2a2956d16b4ac2 | #OBSOLETE - This script has been moved to the Jupyter Notebook: OSPO_Project_Health_Data_Tableau.ipynb
from common_functions import augur_db_connect, get_dates, get_commits_by_repo
from common_functions import repo_api_call, fork_archive
from tableau_functions import sustain_prs_by_repo_tableau, contributor_risk_tableau, response_time_tableau, activity_release_tableau
six_months = 180 # Default to one year of data
year = 365 # Default to one year of data
engine = augur_db_connect()
start_date, end_date = get_dates(year)
six_start_date, six_end_date = get_dates(six_months)
commit_threshold = 60 # 90 but use 1500 for testing
repo_list_commits = get_commits_by_repo(six_start_date, six_end_date, engine)
top = repo_list_commits.loc[repo_list_commits['count'] > commit_threshold]
# Testing - Delete this line later
i = 0
for index, repo in top.iterrows():
repo_id = repo['repo_id']
repo_name = repo['repo_name']
repo_path = repo['repo_path']
org_name = repo_path[11:(len(repo_path)-1)]
print('Processing:', org_name, repo_name, repo_path, repo_id, repo['count'])
try:
repo_api = repo_api_call(repo_name, org_name)
except:
print('Cannot process API calls for:', org_name, repo_name, repo_path, repo_id)
is_fork, is_archived = fork_archive(repo_name, org_name, engine)
# Only gather data from repos that aren't forks or archived
if is_fork == False and is_archived == False:
sustain_prs_by_repo_tableau(repo_id, repo_name, org_name, start_date, end_date, engine)
contributor_risk_tableau(repo_id, repo_name, org_name, start_date, end_date, engine)
response_time_tableau(repo_id, repo_name, org_name, start_date, end_date, engine)
activity_release_tableau(repo_name, org_name, start_date, end_date, repo_api)
# Testing - Delete these lines later
if i > 2:
break
else:
i+=1
|
py | 1a3d7ce172d0ec0493fe732bcc5518f4255d5f05 | examples = [
"""Josephine softens. "Yeah, okay. I probably got a little too worked up there."
A bell chimes in the house.
"Oh, wow. Is it that late? We should be headed to bed if you wanna be up early enough to dig your car out."
"Yeah, I should probably turn in."
"The night's still young. Why don't we stay up a little longer?"
Donald Trump shows up. You realize you've been in simulated White House this whole time.
""",
"Alex softens. Josephine picks up an apple.",
"She walks in beauty, like the night. It snows that night. The rain, the rain. You are free.",
"You decided to meet him on a pub called Le Bon Temps Roule.",
"The Golden Gate Bridge was painted green by Joe Biden."
]
def test_neuralcoref():
"""
"""
import spacy
import neuralcoref
nlp = spacy.load('en')
neuralcoref.add_to_pipe(nlp)
doc = nlp('My sister has a dog. She loves him. Angela lives in Boston. She is quite happy in that city.')
print(f"doc {doc}")
print(f"coref clusters {doc._.coref_clusters}")
for ent in doc.ents:
print(ent._.coref_cluster)
def test_spacy_ner():
"""
PERSON: People, including fictional.
NORP: Nationalities or religious or political groups.
FAC: Buildings, airports, highways, bridges, etc.
ORG: Companies, agencies, institutions, etc.
GPE: Countries, cities, states.
LOC: Non-GPE locations, mountain ranges, bodies of water.
PRODUCT: Objects, vehicles, foods, etc. (Not services.)
EVENT: Named hurricanes, battles, wars, sports events, etc.
WORK_OF_ART: Titles of books, songs, etc.
LAW: Named documents made into laws.
LANGUAGE: Any named language.
DATE: Absolute or relative dates or periods.
TIME: Times smaller than a day.
PERCENT: Percentage, including ”%“.
MONEY: Monetary values, including unit.
QUANTITY: Measurements, as of weight or distance.
ORDINAL: “first”, “second”, etc.
CARDINAL: Numerals that do not fall under another type.
"""
import spacy
nlp = spacy.load('en_core_web_lg')
text = examples[-1]
doc = nlp(text)
print(doc.text)
# for token in doc:
# print(token.text, token.pos_, token.dep_, token.ent_type_)
# print('entities')
# for entity in doc.ents:
# start, end = entity.start, entity.end
# for token in doc[start:end]:
# print(token.text, token.ent_type_)
# for token in doc:
# print(token.text, token.pos_, token.dep_, token.ent_type_)
print('pos_')
pronouns = []
for token in doc:
print(token, token.pos_)
# test_spacy_ner()
def test_bert_huggingface_ner():
from transformers import AutoTokenizer, AutoModelForTokenClassification
from transformers import pipeline
tokenizer = AutoTokenizer.from_pretrained("dslim/bert-base-NER")
model = AutoModelForTokenClassification.from_pretrained("dslim/bert-base-NER")
nlp = pipeline("ner", model=model, tokenizer=tokenizer)
tokens = nlp.tokenizer.tokenize(examples[-1])
print(type(tokens), type(tokens[0]))
print(tokens)
example = examples[-2]
print(example)
ner_results = nlp(example)
print(ner_results)
same_ent_type = lambda x, y: x.split('-')[-1] == y.split('-')[-1]
entities = []
prev_beg = {}
for i, entity in enumerate(ner_results):
prev_entity = ner_results[i - 1] if i > 0 else {}
print(entity['word'], entity['entity'])
if entity['entity'].startswith('B'):
prev_beg = entity
entities.append(prev_beg)
elif entity['entity'].startswith('I'):
if entity['word'].startswith('##'):
word = entity['word'][2:]
else:
word = ' ' + entity['word']
prev_beg['word'] += word
else:
raise Exception("How?")
print([e for e in entities])
# test_bert_huggingface_ner()
def test_ner_in_practice():
from analysis import ner_pipeline, nlp, ner
ner(examples[-1])
# test_ner_in_practice()
def test_semantic_parse():
import spacy
import textacy
nlp = spacy.load('en_core_web_lg')
# https://stackoverflow.com/questions/56896753/is-there-a-way-to-get-entire-constituents-using-spacy
text = "Anna was mad at Alex."
doc = nlp(text)
# print(doc.text)
# for token in doc:
# print(token.text, token.tag_, token.pos_, token.dep_, token.ent_type_, [a for a in token.subtree])
triples = [svo for svo in textacy.extract.subject_verb_object_triples(doc)]
def t(tokens, lemma=True):
# convert a list of tokens into text
return " ".join([x.lemma_ if lemma else x.text for x in tokens])
for subject, verb, object in triples:
s1 = subject[0]
print(type(s1))
print(s1.text, s1.lemma_)
print(f'{t(verb)}({t(subject, False)}, {t(object, False)})')
test_semantic_parse()
# from stanza.server import CoreNLPClient
# stanza.download('en')
# # nlp = stanza.Pipeline('en')
# #
# # doc =
# #
# # for sentence in doc.sentences:
# # print(sentence.ents)
# # print(sentence.dependencies)
#
#
# text = \
# """It's been a while since you've been here, but you quickly find your way. Even after all these years, the path is still a little too familiar to be completely trustworthy. The door creaks open, and you slowly creep inside. You have a slight feeling of deja-vu, as if you've been here before."""
#
# # with CoreNLPClient(annotators=["tokenize","ssplit","pos","lemma","depparse","natlog","openie"], be_quiet=False) as client:
# with CoreNLPClient(annotators=["openie"], be_quiet=False) as client:
# ann = client.annotate(text)
# # print(ann)
# for sentence in ann.sentence:
# for triple in sentence.openieTriple:
# print(triple) |
py | 1a3d7d5f7f098f8222ea5af0bfcf3de42164072b | import tweepy #You need to install tweepy#
import re
import time
import csv
###twitter
auth = tweepy.OAuthHandler([YOUR TWITTER API KEY], [YOUR TWITTER API KEY])
auth.set_access_token([YOUR TWITTER API KEY], [YOUR TWITTER API KEY])
api = tweepy.API(auth)
###
status=[]
no_status=[]
def DB():
new_status=[]
txt=[]
text_=api.search(q='#bitcoin',lang='en',show_user='True')
id_=re.findall("'id_str': '(.+?)'",str(text_))
y=1
for i in id_:
try:
text_=api.get_status(i,tweet_mode='extended')
if i not in status:
status.append(str(i))
new_status.append(i)
text=re.findall("'full_text': '(.+?)'",str(text_))
try:
texto=str(text[0])
texto=texto.replace('"','')
txt.append(texto)
except Exception as e:
txt.append('VOID')
y+=1
except Exception as e:
if 'Rate limit exceeded' in str(e):
print ('Rate limit exceeded')
if i not in no_status:
no_status.append(i)
x=0
with open('status_raw.db','a') as csvfile: #CREATE THE "status_raw.db" IN ADVANCE
fieldnames = ['ID','TEXT']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
for i in new_status:
writer.writerow({'ID':str(i),'TEXT':txt[x]})
x+=1
Z=1
while True:
print ('ROUND:',Z)
DB()
print ('LEN STATUS:',len(status))
print ('LEN NO STATUS:',len(no_status))
print('___________________')
time.sleep(900)#15 minutes wait, not to overload the twitter API
Z+=1
|
py | 1a3d7da2b17969689dc427abec0d5d9807051a7a | from zeit.cms.content.interfaces import ICommonMetadata
from zeit.cms.interfaces import CONFIG_CACHE
from zeit.cms.interfaces import ITypeDeclaration
from zeit.cms.repository.interfaces import IAutomaticallyRenameable
import collections
import grokcore.component as grok
import logging
import requests
import transaction
import zeit.cms.celery
import zeit.cms.checkout.interfaces
import zeit.cms.interfaces
import zope.lifecycleevent
log = logging.getLogger(__name__)
@grok.subscribe(
zeit.cms.interfaces.ICMSContent,
zeit.cms.checkout.interfaces.IAfterCheckinEvent)
def notify_after_checkin(context, event):
if event.publishing:
return
# XXX Work around redis/ZODB race condition, see BUG-796.
for hook in HOOKS:
notify_webhook.apply_async((context.uniqueId, hook.url), countdown=5)
@grok.subscribe(zope.lifecycleevent.IObjectAddedEvent)
def notify_after_add(event):
context = event.object
if not zeit.cms.interfaces.ICMSContent.providedBy(context):
return
if zeit.cms.repository.interfaces.IRepository.providedBy(context):
return
if zeit.cms.workingcopy.interfaces.IWorkingcopy.providedBy(
event.newParent):
return
for hook in HOOKS:
notify_webhook.delay(context.uniqueId, hook.url)
@zeit.cms.celery.task(bind=True, queuename='webhook')
def notify_webhook(self, uniqueId, url):
content = zeit.cms.interfaces.ICMSContent(uniqueId, None)
if content is None:
log.warning('Could not resolve %s, ignoring.', uniqueId)
return
hook = HOOKS.factory.find(url)
if hook is None:
log.warning('Hook configuration for %s has vanished, ignoring.', url)
return
try:
hook(content)
except TechnicalError as e:
raise self.retry(countdown=e.countdown)
# Don't even think about trying to write to DAV cache, to avoid conflicts.
transaction.abort()
class Hook(object):
def __init__(self, url):
self.url = url
self.excludes = []
def __call__(self, content):
if self.should_exclude(content):
return
log.debug('Notifying %s about %s', self.url, content)
try:
self.deliver(content)
except requests.exceptions.HTTPError as err:
if getattr(err.response, 'status_code', 500) < 500:
raise
else:
log.warning('Webhook %s returned error, retrying',
self.url, exc_info=True)
raise TechnicalError()
except requests.exceptions.RequestException:
log.warning('Webhook %s returned error, retrying',
self.url, exc_info=True)
raise TechnicalError()
def deliver(self, content):
r = requests.post(self.url, json=[content.uniqueId], timeout=10)
r.raise_for_status()
def add_exclude(self, key, value):
self.excludes.append((key, value))
def should_exclude(self, content):
renameable = getattr(
IAutomaticallyRenameable(content, None), 'renameable', False)
if renameable:
return True
for exclude in self.excludes:
if self._matches(exclude, content):
log.debug('Skipping %s, matched exclude %s', content, exclude)
return True
return False
def _matches(self, exclude, content):
key, value = exclude
func = getattr(self, '_match_%s' % key)
return func(content, value)
def _match_type(self, content, value):
typ = getattr(
ITypeDeclaration(content, None), 'type_identifier', 'unknown')
return typ == value
def _match_product(self, content, value):
if not ICommonMetadata.providedBy(content):
return False
return content.product and content.product.id == value
class HookSource(zeit.cms.content.sources.SimpleXMLSource):
config_url = 'checkin-webhook-config'
default_filename = 'checkin-webhooks.xml'
@CONFIG_CACHE.cache_on_arguments()
def _values(self):
result = collections.OrderedDict()
tree = self._get_tree()
for node in tree.iterchildren('webhook'):
hook = Hook(node.get('url'))
for exclude in node.xpath('exclude/*'):
hook.add_exclude(exclude.tag, exclude.text)
result[hook.url] = hook
return result
def getValues(self):
return self._values().values()
def find(self, url):
return self._values().get(url)
HOOKS = HookSource()
class TechnicalError(Exception):
def __init__(self, countdown=60):
self.countdown = countdown
|
py | 1a3d7e072b2c8c409301f64c870755837d979fbf | """ Unit tests for visibility operations
"""
import unittest
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from numpy.testing import assert_allclose
from rascil.data_models.memory_data_models import Skycomponent
from rascil.data_models.polarisation import PolarisationFrame
from rascil.processing_components.imaging import dft_skycomponent_visibility
from rascil.processing_components.simulation import create_named_configuration
from rascil.processing_components import create_flagtable_from_blockvisibility, qa_flagtable, \
create_blockvisibility, create_flagtable_from_rows
class TestFlagTableOperations(unittest.TestCase):
def setUp(self):
self.lowcore = create_named_configuration('LOWBD2-CORE')
self.times = (numpy.pi / 43200.0) * numpy.arange(0.0, 300.0, 30.0)
self.frequency = numpy.linspace(1.0e8, 1.1e8, 3)
self.channel_bandwidth = numpy.array([1e7, 1e7, 1e7])
# Define the component and give it some spectral behaviour
f = numpy.array([100.0, 20.0, -10.0, 1.0])
self.flux = numpy.array([f, 0.8 * f, 0.6 * f])
self.polarisation_frame = PolarisationFrame("linear")
# The phase centre is absolute and the component is specified relative (for now).
# This means that the component should end up at the position phasecentre+compredirection
self.phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
self.compabsdirection = SkyCoord(ra=+181.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
pcof = self.phasecentre.skyoffset_frame()
self.compreldirection = self.compabsdirection.transform_to(pcof)
self.comp = Skycomponent(direction=self.compreldirection, frequency=self.frequency, flux=self.flux)
def test_create_flagtable(self):
bvis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
phasecentre=self.phasecentre,
polarisation_frame=self.polarisation_frame,
weight=1.0)
ft = create_flagtable_from_blockvisibility(bvis)
print(ft)
assert len(ft.data) == len(bvis.data)
def test_create_flagtable_from_rows(self):
bvis = create_blockvisibility(self.lowcore, self.times, self.frequency,
channel_bandwidth=self.channel_bandwidth,
polarisation_frame=self.polarisation_frame,
phasecentre=self.phasecentre, weight=1.0)
ft = create_flagtable_from_blockvisibility(bvis)
rows = ft.time > 150.0
ft = create_flagtable_from_blockvisibility(bvis)
selected_ft = create_flagtable_from_rows(ft, rows)
assert len(selected_ft.time) == numpy.sum(numpy.array(rows))
if __name__ == '__main__':
unittest.main()
|
py | 1a3d7e3423feb2b0c67d31b4e31f1ef03dc353d7 | import os
import autowig
import sys
import pickle
import itertools
import subprocess
asg = autowig.AbstractSemanticGraph()
asg = autowig.parser(asg, [os.path.join(sys.prefix, 'include', 'basic', 'binomial.h'),
os.path.join(sys.prefix, 'include', 'basic', 'overload.h')],
['-x', 'c++', '-std=c++11', '-ferror-limit=0', '-I' + os.path.join(sys.prefix, 'include')],
bootstrap=1)
asg = autowig.controller(asg)
autowig.generator.plugin = 'boost_python_internal'
wrappers = autowig.generator(asg, module='src/py/_basic.cpp',
decorator='src/py/basic/_basic.py',
closure=False)
wrappers.write() |
py | 1a3d7e7d2a763e2f5bf621528cb7c89736ce63f8 | #-------------------------------------#
# 对数据集进行训练
#-------------------------------------#
import os
import numpy as np
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from utils.dataloader import yolo_dataset_collate, YoloDataset
from nets.yolo_training import YOLOLoss,Generator
from nets.yolo4 import YoloBody
from tensorboardX import SummaryWriter
from tqdm import tqdm
#---------------------------------------------------#
# 获得类和先验框
#---------------------------------------------------#
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape([-1,3,2])[::-1,:,:]
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,genval,Epoch,cuda,writer):
global train_tensorboard_step, val_tensorboard_step
total_loss = 0
val_loss = 0
net.train()
with tqdm(total=epoch_size,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_size:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = Variable(torch.from_numpy(images).type(torch.FloatTensor)).cuda()
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
else:
images = Variable(torch.from_numpy(images).type(torch.FloatTensor))
targets = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets]
#----------------------#
# 清零梯度
#----------------------#
optimizer.zero_grad()
#----------------------#
# 前向传播
#----------------------#
outputs = net(images)
losses = []
num_pos_all = 0
#----------------------#
# 计算损失
#----------------------#
for i in range(3):
loss_item, num_pos = yolo_losses[i](outputs[i], targets)
losses.append(loss_item)
num_pos_all += num_pos
loss = sum(losses) / num_pos_all
total_loss += loss.item()
#----------------------#
# 反向传播
#----------------------#
loss.backward()
optimizer.step()
# 将loss写入tensorboard,每一步都写
writer.add_scalar('Train_loss', loss, train_tensorboard_step)
train_tensorboard_step += 1
pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1),
'lr' : get_lr(optimizer)})
pbar.update(1)
# 将loss写入tensorboard,下面注释的是每个世代保存一次
# writer.add_scalar('Train_loss', total_loss/(iteration+1), epoch)
net.eval()
print('Start Validation')
with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(genval):
if iteration >= epoch_size_val:
break
images_val, targets_val = batch[0], batch[1]
with torch.no_grad():
if cuda:
images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor)).cuda()
targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]
else:
images_val = Variable(torch.from_numpy(images_val).type(torch.FloatTensor))
targets_val = [Variable(torch.from_numpy(ann).type(torch.FloatTensor)) for ann in targets_val]
optimizer.zero_grad()
outputs = net(images_val)
losses = []
num_pos_all = 0
for i in range(3):
loss_item, num_pos = yolo_losses[i](outputs[i], targets_val)
losses.append(loss_item)
num_pos_all += num_pos
loss = sum(losses) / num_pos_all
val_loss += loss.item()
# 将loss写入tensorboard, 下面注释的是每一步都写
# writer.add_scalar('Val_loss', loss, val_tensorboard_step)
# val_tensorboard_step += 1
pbar.set_postfix(**{'total_loss': val_loss / (iteration + 1)})
pbar.update(1)
# 将loss写入tensorboard,每个世代保存一次
writer.add_scalar('Val_loss',val_loss / (epoch_size_val+1), epoch)
print('Finish Validation')
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
print('Saving state, iter:', str(epoch+1))
torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_loss/(epoch_size_val+1)))
if __name__ == "__main__":
#-------------------------------#
# 是否使用Cuda
# 没有GPU可以设置成False
#-------------------------------#
Cuda = False
#-------------------------------#
# Dataloder的使用
#-------------------------------#
Use_Data_Loader = True
#------------------------------------------------------#
# 是否对损失进行归一化,用于改变loss的大小
# 用于决定计算最终loss是除上batch_size还是除上正样本数量
#------------------------------------------------------#
normalize = False
#-------------------------------#
# 输入的shape大小
# 显存比较小可以使用416x416
# 显存比较大可以使用608x608
#-------------------------------#
input_shape = (416,416)
#----------------------------------------------------#
# classes和anchor的路径,非常重要
# 训练前一定要修改classes_path,使其对应自己的数据集
#----------------------------------------------------#
anchors_path = 'model_data/yolo_anchors.txt'
classes_path = 'model_data/voc_classes.txt'
#----------------------------------------------------#
# 获取classes和anchor
#----------------------------------------------------#
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
num_classes = len(class_names)
#------------------------------------------------------#
# Yolov4的tricks应用
# mosaic 马赛克数据增强 True or False
# 实际测试时mosaic数据增强并不稳定,所以默认为False
# Cosine_scheduler 余弦退火学习率 True or False
# label_smoothing 标签平滑 0.01以下一般 如0.01、0.005
#------------------------------------------------------#
mosaic = False
Cosine_lr = False
smoooth_label = 0
#------------------------------------------------------#
# 创建yolo模型
# 训练前一定要修改classes_path和对应的txt文件
#------------------------------------------------------#
model = YoloBody(len(anchors[0]), num_classes)
#------------------------------------------------------#
# 权值文件请看README,百度网盘下载
#------------------------------------------------------#
model_path = "model_data/Epoch100-Total_Loss6.4410-Val_Loss8.7225.pth"
print('Loading weights into state dict...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_dict = model.state_dict()
pretrained_dict = torch.load(model_path, map_location=device)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('Finished!')
net = model.train()
if Cuda:
net = torch.nn.DataParallel(model)
cudnn.benchmark = True
net = net.cuda()
# 建立loss函数
yolo_losses = []
for i in range(3):
yolo_losses.append(YOLOLoss(np.reshape(anchors,[-1,2]),num_classes, \
(input_shape[1], input_shape[0]), smoooth_label, Cuda, normalize))
#----------------------------------------------------#
# 获得图片路径和标签
#----------------------------------------------------#
annotation_path = '2007_train.txt'
#----------------------------------------------------------------------#
# 验证集的划分在train.py代码里面进行
# 2007_test.txt和2007_val.txt里面没有内容是正常的。训练不会使用到。
# 当前划分方式下,验证集和训练集的比例为1:9
#----------------------------------------------------------------------#
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
writer = SummaryWriter(log_dir='logs',flush_secs=60)
if Cuda:
graph_inputs = torch.from_numpy(np.random.rand(1,3,input_shape[0],input_shape[1])).type(torch.FloatTensor).cuda()
else:
graph_inputs = torch.from_numpy(np.random.rand(1,3,input_shape[0],input_shape[1])).type(torch.FloatTensor)
writer.add_graph(model, (graph_inputs,))
#------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小Batch_size
#------------------------------------------------------#
train_tensorboard_step = 1
val_tensorboard_step = 1
if True:
lr = 1e-3
Batch_size = 4
Init_Epoch = 0
Freeze_Epoch = 50
#----------------------------------------------------------------------------#
# 我在实际测试时,发现optimizer的weight_decay起到了反作用,
# 所以去除掉了weight_decay,大家也可以开起来试试,一般是weight_decay=5e-4
#----------------------------------------------------------------------------#
optimizer = optim.Adam(net.parameters(),lr)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.92)
if Use_Data_Loader:
train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)
val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
else:
gen = Generator(Batch_size, lines[:num_train],
(input_shape[0], input_shape[1])).generate(train=True, mosaic = mosaic)
gen_val = Generator(Batch_size, lines[num_train:],
(input_shape[0], input_shape[1])).generate(train=False, mosaic = False)
epoch_size = max(1, num_train//Batch_size)
epoch_size_val = num_val//Batch_size
#------------------------------------#
# 冻结一定部分训练
#------------------------------------#
for param in model.backbone.parameters():
param.requires_grad = False
for epoch in range(Init_Epoch,Freeze_Epoch):
fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Freeze_Epoch,Cuda,writer)
lr_scheduler.step()
if True:
lr = 1e-4
Batch_size = 2
Freeze_Epoch = 50
Unfreeze_Epoch = 100
#----------------------------------------------------------------------------#
# 我在实际测试时,发现optimizer的weight_decay起到了反作用,
# 所以去除掉了weight_decay,大家也可以开起来试试,一般是weight_decay=5e-4
#----------------------------------------------------------------------------#
optimizer = optim.Adam(net.parameters(),lr)
if Cosine_lr:
lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
else:
lr_scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=1,gamma=0.92)
if Use_Data_Loader:
train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)
val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)
gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4,pin_memory=True,
drop_last=True, collate_fn=yolo_dataset_collate)
else:
gen = Generator(Batch_size, lines[:num_train],
(input_shape[0], input_shape[1])).generate(train=True, mosaic = mosaic)
gen_val = Generator(Batch_size, lines[num_train:],
(input_shape[0], input_shape[1])).generate(train=False, mosaic = False)
epoch_size = max(1, num_train//Batch_size)
epoch_size_val = num_val//Batch_size
#------------------------------------#
# 解冻后训练
#------------------------------------#
for param in model.backbone.parameters():
param.requires_grad = True
for epoch in range(Freeze_Epoch,Unfreeze_Epoch):
fit_ont_epoch(net,yolo_losses,epoch,epoch_size,epoch_size_val,gen,gen_val,Unfreeze_Epoch,Cuda,writer)
lr_scheduler.step()
|
py | 1a3d7ff315b3e4bbb296d5ff709a9ddfe7132646 | # -*- coding: utf-8 -*-
__version__ = '0.6.5'
__version_text__ = '''giterm version {0} -
Copyright © 2015-2017 Tim Legrand -
License BSD 2-Clause License -
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE
'''.format(__version__)
|
py | 1a3d8063674a8af6386f91c2f64b9da2d50b79b4 | import boto3
import os
import json
import datetime
from time import gmtime, strftime
from boto3.session import Session
region = boto3.session.Session().region_name
sagemaker = boto3.client('sagemaker')
code_pipeline = boto3.client('codepipeline')
def lambda_handler(event, context):
try:
print(event)
train_start = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
train_start_calc = datetime.datetime.now()
codepipeline_job = event['CodePipeline.job']['id']
print('[INFO]CODEPIPELINE_JOB:', codepipeline_job)
print('[INFO]TRAIN_START:', train_start)
userParamText = event['CodePipeline.job']['data']['actionConfiguration']['configuration']['UserParameters']
user_param = json.loads(userParamText)
job_name = 'mlops-bia-xgboost-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print('[INFO]TRAINING_JOB_NAME:', job_name)
event['job_name'] = job_name
event['stage'] = 'Training'
event['status'] = 'InProgress'
event['message'] = 'training job "{} started."'.format(job_name)
create_training_job(user_param, job_name)
write_job_info_s3(event)
put_job_success(event)
except Exception as e:
print(e)
print('[ERROR] Unable to create training job.')
event['message'] = str(e)
put_job_failure(event)
return event
def create_training_job(user_param, job_name):
try:
print("[INFO]CODEPIPELINE_USER_PARAMETERS:", user_param)
# Environment variable containing S3 bucket for storing the model artifact
model_artifact_bucket = os.environ['ModelArtifactBucket']
print("[INFO]MODEL_ARTIFACT_BUCKET:", model_artifact_bucket)
# Environment variable containing S3 bucket containing training data
data_bucket = os.environ['S3DataBucket']
print("[INFO]TRAINING_DATA_BUCKET:", data_bucket)
# Role to pass to SageMaker training job that has access to training data in S3, etc
SageMakerRole = os.environ['SageMakerExecutionRole']
#Get ECR information for BIA
algo_version = user_param['Algorithm']
ecr_path = os.environ['AlgoECR']
container_path = ecr_path + '/' + algo_version
print('[INFO]Container Path', container_path)
train_instance_type = user_param['traincompute']
train_volume_size = user_param['traininstancevolumesize']
train_instance_count = user_param['traininstancecount']
maxdepth_in = user_param['MaxDepth']
eta_in = user_param['eta']
gamma_in = user_param['gamma']
min_child_weight_in = user_param['MinChildWeight']
subsample_in = user_param['SubSample']
silent_in = user_param['Silent']
objective_in = user_param['Objective']
num_round_in = user_param['NumRound']
print('[INFO]TRAIN_INSTANCE_TYPE:', train_instance_type)
print('[INFO]TRAIN_VOLUME_SIZE:', train_volume_size)
print('[INFO]TRAIN_INSTANCE_COUNT:', train_instance_count)
create_training_params = \
{
"AlgorithmSpecification": {
"TrainingImage": container_path,
"TrainingInputMode": "File"
},
"RoleArn": SageMakerRole,
"OutputDataConfig": {
"S3OutputPath": "s3://{}/{}/output".format(model_artifact_bucket, job_name)
},
"ResourceConfig": {
"InstanceCount": train_instance_count,
"InstanceType": train_instance_type,
"VolumeSizeInGB": train_volume_size
},
"TrainingJobName": job_name,
"HyperParameters": {
"max_depth": maxdepth_in,
"eta": eta_in,
"gamma": gamma_in,
"min_child_weight": min_child_weight_in,
"objective": objective_in,
"num_round": num_round_in
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 3600
},
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://{}/train".format(data_bucket),
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "csv",
"CompressionType": "None"
}
],
"OutputDataConfig": {
"S3OutputPath": "s3://{}/{}/output".format(model_artifact_bucket, job_name)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 60 * 60
}
}
response = sagemaker.create_training_job(**create_training_params)
except Exception as e:
print(str(e))
raise(e)
def write_job_info_s3(event):
print(event)
objectKey = event['CodePipeline.job']['data']['outputArtifacts'][0]['location']['s3Location']['objectKey']
bucketname = event['CodePipeline.job']['data']['outputArtifacts'][0]['location']['s3Location']['bucketName']
artifactCredentials = event['CodePipeline.job']['data']['artifactCredentials']
artifactName = event['CodePipeline.job']['data']['outputArtifacts'][0]['name']
# S3 Managed Key for Encryption
S3SSEKey = os.environ['SSEKMSKeyIdIn']
json_data = json.dumps(event)
print(json_data)
session = Session(aws_access_key_id=artifactCredentials['accessKeyId'],
aws_secret_access_key=artifactCredentials['secretAccessKey'],
aws_session_token=artifactCredentials['sessionToken'])
s3 = session.resource("s3")
object = s3.Object(bucketname, objectKey)
print(object)
object.put(Body=json_data, ServerSideEncryption='aws:kms', SSEKMSKeyId=S3SSEKey)
print('[SUCCESS]Job Information Written to S3')
def put_job_success(event):
print('[SUCCESS]Training Job started - kicking off next stage in pipeline...')
print(event['message'])
code_pipeline.put_job_success_result(jobId=event['CodePipeline.job']['id'])
def put_job_failure(event):
print('[FAILURE]Putting job failure')
print(event['message'])
code_pipeline.put_job_failure_result(jobId=event['CodePipeline.job']['id'], failureDetails={'message': event['message'], 'type': 'JobFailed'})
return event
|
py | 1a3d809929b5967a4b3f8edc69f5416f2455ea18 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@version: 1.0
@author: Evan
@time: 2019/12/4 15:16
"""
import tornado.web
import tornado.ioloop
class CookieHandler(tornado.web.RequestHandler):
def get(self):
"""
cookie 在 Response Headers
Set-Cookie: hello="2|1:0|10:1575445821|5:hello|8:RXZhbg==|c6eb04740d9320d33053b28cb0ea8a799f17b26950869fe22309671ccec57513"; expires=Fri, 03 Jan 2020 07:50:21 GMT; Path=/
:return:
"""
# self.set_cookie('username', 'admin', expires_days=3)
self.set_secure_cookie('hello', 'Evan')
class GetCookieHandler(tornado.web.RequestHandler):
def get(self):
"""
cookie 在 Request Headers
Cookie: hello="2|1:0|10:1575445307|5:hello|8:RXZhbg==|b062aa734378e7a3177e8626d66acee4b52b4dc4df1293c20eb926d25824607e"
"""
# username = self.get_cookie('username')
username = self.get_secure_cookie('hello')
self.write(username)
settings = {
'cookie_secret': 'asd123fgh'
}
app = tornado.web.Application([
(r'^/$', CookieHandler),
(r'^/getCookie/$', GetCookieHandler)
], **settings)
app.listen(8000)
tornado.ioloop.IOLoop.instance().start()
|
py | 1a3d81a562a6ec038af6567bb384755b33bb2595 | """
Precisely APIs
Enhance & enrich your data, applications, business processes, and workflows with rich location, information, and identify APIs. # noqa: E501
The version of the OpenAPI document: 11.9.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from com.precisely.apis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from com.precisely.apis.exceptions import ApiAttributeError
class GeoLocationState(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'confidence': (str,), # noqa: E501
'value': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'confidence': 'confidence', # noqa: E501
'value': 'value', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GeoLocationState - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
confidence (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GeoLocationState - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
confidence (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
py | 1a3d8435134b5479c663da81ce1063d1d323363c | # encoding: utf-8
"""lxml custom element classes for DrawingML line-related XML elements."""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from pptx.enum.dml import MSO_LINE_DASH_STYLE
from pptx.oxml.xmlchemy import BaseOxmlElement, OptionalAttribute
class CT_PresetLineDashProperties(BaseOxmlElement):
"""`a:prstDash` custom element class"""
val = OptionalAttribute('val', MSO_LINE_DASH_STYLE)
|
py | 1a3d8461726e2a92df9515b5f7f8a3fc8621d3de | import ctypes
import ctypes.util
import functools
import logging
import platform
import struct
import sys
import pymem.exception
import pymem.memory
import pymem.process
import pymem.ressources.kernel32
import pymem.ressources.structure
import pymem.thread
logger = logging.getLogger('pymem')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class Pymem(object):
"""Initialize the Pymem class.
If process_name is given, will open the process and retrieve a handle over it.
Parameters
----------
process_name: str
The name of the process to be opened
"""
def __init__(self, process_name=None):
self.process_id = None
self.process_handle = None
self.thread_handle = None
self.is_WoW64 = None
self.py_run_simple_string = None
self._python_injected = None
if process_name:
self.open_process_from_name(process_name)
self.check_wow64()
def check_wow64(self):
"""Check if a process is running under WoW64.
"""
verdict = pymem.process.is_64_bit(self.process_handle)
self.is_WoW64 = bool(verdict)
def list_modules(self):
"""List a process loaded modules.
Returns
-------
list(MODULEINFO)
List of process loaded modules
"""
modules = pymem.process.enum_process_module(self.process_handle)
return modules
def inject_python_interpreter(self):
"""Inject python interpreter into target process and call Py_InitializeEx.
"""
def find_existing_interpreter(_python_version):
_local_handle = pymem.ressources.kernel32.GetModuleHandleW(_python_version)
module = pymem.process.module_from_name(self.process_handle, _python_version)
self.py_run_simple_string = (
module.lpBaseOfDll + (
pymem.ressources.kernel32.GetProcAddress(_local_handle, b'PyRun_SimpleString')
- _local_handle
)
)
self._python_injected = True
pymem.logger.debug('PyRun_SimpleString loc: 0x%08x' % self.py_run_simple_string)
return module.lpBaseOfDll
if self._python_injected:
return
# find the python library
python_version = "python{0}{1}.dll".format(sys.version_info.major, sys.version_info.minor)
python_lib = ctypes.util.find_library(python_version)
if not python_lib:
raise pymem.exception.PymemError('Could not find python library')
# Find or inject python module
python_module = pymem.process.module_from_name(self.process_handle, python_version)
if python_module:
python_lib_h = find_existing_interpreter(python_version)
else:
python_lib_h = pymem.process.inject_dll(self.process_handle, bytes(python_lib, 'ascii'))
if not python_lib_h:
raise pymem.exception.PymemError('Inject dll failed')
local_handle = pymem.ressources.kernel32.GetModuleHandleW(python_version)
py_initialize_ex = (
python_lib_h + (
pymem.ressources.kernel32.GetProcAddress(local_handle, b'Py_InitializeEx')
- local_handle
)
)
self.py_run_simple_string = (
python_lib_h + (
pymem.ressources.kernel32.GetProcAddress(local_handle, b'PyRun_SimpleString')
- local_handle
)
)
if not py_initialize_ex:
raise pymem.exception.PymemError('Empty py_initialize_ex')
if not self.py_run_simple_string:
raise pymem.exception.PymemError('Empty py_run_simple_string')
self.start_thread(py_initialize_ex)
self._python_injected = True
pymem.logger.debug('Py_InitializeEx loc: 0x%08x' % py_initialize_ex)
pymem.logger.debug('PyRun_SimpleString loc: 0x%08x' % self.py_run_simple_string)
def inject_python_shellcode(self, shellcode):
"""Inject a python shellcode into memory and execute it.
Parameters
----------
shellcode: str
A string with python instructions.
"""
shellcode = shellcode.encode('ascii')
shellcode_addr = pymem.ressources.kernel32.VirtualAllocEx(
self.process_handle,
0,
len(shellcode),
pymem.ressources.structure.MEMORY_STATE.MEM_COMMIT.value | pymem.ressources.structure.MEMORY_STATE.MEM_RESERVE.value,
pymem.ressources.structure.MEMORY_PROTECTION.PAGE_READWRITE.value
)
pymem.logger.debug('shellcode_addr loc: 0x%08x' % shellcode_addr)
written = ctypes.c_ulonglong(0) if '64bit' in platform.architecture() else ctypes.c_ulong(0)
pymem.ressources.kernel32.WriteProcessMemory(self.process_handle, shellcode_addr, shellcode, len(shellcode), ctypes.byref(written))
# check written
self.start_thread(self.py_run_simple_string, shellcode_addr)
def start_thread(self, address, params=None):
"""Create a new thread within the current debugged process.
Parameters
----------
address: int
An address from where the thread starts
params: int
An optional address with thread parameters
Returns
-------
int
The new thread identifier
"""
thread_id = ctypes.c_ulong(0)
thread_h = pymem.ressources.kernel32.CreateRemoteThread(
self.process_handle,
None,
0,
address,
params,
0,
None
)
pymem.ressources.kernel32.WaitForSingleObject(thread_h, -1)
pymem.logger.debug('New thread_id: 0x%08x' % thread_h)
return thread_h
def open_process_from_name(self, process_name):
"""Open process given it's name and stores the handle into process_handle
Parameters
----------
process_name: str
The name of the process to be opened
Raises
------
TypeError
If process name is not valid
ProcessNotFound
If process name is not found
CouldNotOpenProcess
If process cannot be opened
"""
if not process_name or not isinstance(process_name, str):
raise TypeError('Invalid argument: {}'.format(process_name))
process32 = pymem.process.process_from_name(process_name)
if not process32:
raise pymem.exception.ProcessNotFound(process_name)
self.process_id = process32.th32ProcessID
self.open_process_from_id(self.process_id)
def open_process_from_id(self, process_id):
"""Open process given it's name and stores the handle into `self.process_handle`.
Parameters
----------
process_id: int
The unique process identifier
Raises
------
TypeError
If process identifier is not an integer
CouldNotOpenProcess
If process cannot be opened
"""
if not process_id or not isinstance(process_id, int):
raise TypeError('Invalid argument: {}'.format(process_id))
self.process_id = process_id
self.process_handle = pymem.process.open(self.process_id)
if not self.process_handle:
raise pymem.exception.CouldNotOpenProcess(self.process_id)
pymem.logger.debug('Process {} is being debugged'.format(
process_id
))
def close_process(self):
"""Close the current opened process
Raises
------
ProcessError
If there is no process opened
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
pymem.process.close_handle(self.process_handle)
self.process_handle = None
self.process_id = None
self.is_WoW64 = None
self.py_run_simple_string = None
self._python_injected = None
if self.thread_handle:
pymem.process.close_handle(self.thread_handle)
def allocate(self, size):
"""Allocate memory into the current opened process.
Parameters
----------
size: int
The size of the region of memory to allocate, in bytes.
Raises
------
ProcessError
If there is no process opened
TypeError
If size is not an integer
Returns
-------
HANDLE
The base address of the current process.
"""
if not size or not isinstance(size, int):
raise TypeError('Invalid argument: {}'.format(size))
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
address = pymem.memory.allocate_memory(self.process_handle, size)
return address
def free(self, address):
"""Free memory from the current opened process given an address.
Parameters
----------
address: int
An address of the region of memory to be freed.
Raises
------
ProcessError
If there is no process opened
TypeError
If address is not an integer
"""
if not address or not isinstance(address, int):
raise TypeError('Invalid argument: {}'.format(address))
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
return pymem.memory.free_memory(self.process_handle, address)
@property
def process_base(self):
"""Lookup process base Module.
Raises
------
TypeError
If process_id is not an integer
ProcessError
If could not find process first module address
Returns
-------
MODULEINFO
Base module information
"""
if not self.process_id:
raise TypeError('You must open a process before calling this property')
base_module = pymem.process.base_module(self.process_handle)
if not base_module:
raise pymem.exception.ProcessError("Could not find process first module")
return base_module
@property
@functools.lru_cache(maxsize=1)
def main_thread(self):
"""Retrieve ThreadEntry32 of main thread given its creation time.
Raises
------
ProcessError
If there is no process opened or could not list process thread
Returns
-------
Thread
Process main thread
"""
if not self.process_id:
raise pymem.exception.ProcessError('You must open a process before calling this method')
threads = pymem.process.enum_process_thread(self.process_id)
threads = sorted(threads, key=lambda k: k.creation_time)
if not threads:
raise pymem.exception.ProcessError('Could not list process thread')
main_thread = threads[0]
main_thread = pymem.thread.Thread(self.process_handle, main_thread)
return main_thread
@property
@functools.lru_cache(maxsize=1)
def main_thread_id(self):
"""Retrieve th32ThreadID from main thread
Raises
------
ProcessError
If there is no process opened or could not list process thread
Returns
-------
int
Main thread identifier
"""
if not self.process_id:
raise pymem.exception.ProcessError('You must open a process before calling this method')
return self.main_thread.thread_id
def read_bytes(self, address, length):
"""Reads bytes from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
length: int
Number of bytes to be read
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
Returns
-------
bytes
the raw value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_bytes(self.process_handle, address, length)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, length, e.error_code)
return value
def read_char(self, address):
"""Reads 1 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
str
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_char(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('b'), e.error_code)
return value
def read_uchar(self, address):
"""Reads 1 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
str
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_uchar(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('B'), e.error_code)
return value
def read_int(self, address):
"""Reads 4 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_int(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('i'), e.error_code)
return value
def read_uint(self, address):
"""Reads 4 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_uint(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('I'), e.error_code)
return value
def read_short(self, address):
"""Reads 2 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_short(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('h'), e.error_code)
return value
def read_ushort(self, address):
"""Reads 2 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_ushort(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('H'), e.error_code)
return value
def read_float(self, address):
"""Reads 4 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
float
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_float(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('f'), e.error_code)
return value
def read_long(self, address):
"""Reads 4 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_long(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('l'), e.error_code)
return value
def read_ulong(self, address):
"""Reads 4 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_ulong(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('L'), e.error_code)
return value
def read_longlong(self, address):
"""Reads 8 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_longlong(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('q'), e.error_code)
return value
def read_ulonglong(self, address):
"""Reads 8 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_ulonglong(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('Q'), e.error_code)
return value
def read_double(self, address):
"""Reads 8 byte from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
int
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
try:
value = pymem.memory.read_double(self.process_handle, address)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, struct.calcsize('d'), e.error_code)
return value
def read_string(self, address, byte=50):
"""Reads n `byte` from an area of memory in a specified process.
Parameters
----------
address: int
An address of the region of memory to be read.
byte: int
Amount of bytes to be read
Raises
------
ProcessError
If there id no opened process
MemoryReadError
If ReadProcessMemory failed
TypeError
If address is not a valid integer
Returns
-------
str
returns the value read
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if not byte or not isinstance(byte, int):
raise TypeError('Invalid argument: {}'.format(byte))
try:
value = pymem.memory.read_string(self.process_handle, address, byte)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryReadError(address, byte, e.error_code)
return value
def write_int(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_int(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_uint(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_uint(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_short(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_short(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_ushort(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_ushort(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_float(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: float
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, float):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_float(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_long(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_long(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_ulong(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_ulong(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_longlong(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_longlong(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_ulonglong(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_ulonglong(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_double(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: float
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, float):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_double(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_string(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: str
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, str):
raise TypeError('Invalid argument: {}'.format(value))
value = value.encode()
try:
pymem.memory.write_string(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_char(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: str
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, str):
raise TypeError('Invalid argument: {}'.format(value))
value = value.encode()
try:
pymem.memory.write_char(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code)
def write_uchar(self, address, value):
"""Write `value` to the given `address` into the current opened process.
Parameters
----------
address: int
An address of the region of memory to be written.
value: int
the value to be written
Raises
------
ProcessError
If there id no opened process
MemoryWriteError
If WriteProcessMemory failed
TypeError
If address is not a valid integer
"""
if not self.process_handle:
raise pymem.exception.ProcessError('You must open a process before calling this method')
if value is None or not isinstance(value, int):
raise TypeError('Invalid argument: {}'.format(value))
try:
pymem.memory.write_uchar(self.process_handle, address, value)
except pymem.exception.WinAPIError as e:
raise pymem.exception.MemoryWriteError(address, value, e.error_code) |
py | 1a3d84953b9b792a1918876e9184716f1e570dbd | from typing import Callable, Mapping
import pandas as pd
from starfish.core.intensity_table.intensity_table import IntensityTable
from starfish.core.types import (
Axes,
Features,
SpotAttributes,
SpotFindingResults,
TraceBuildingStrategies
)
from .util import _build_intensity_table, _match_spots, _merge_spots_by_round
def build_spot_traces_exact_match(spot_results: SpotFindingResults, **kwargs) -> IntensityTable:
"""
Combines spots found in matching x/y positions across rounds and channels of
an ImageStack into traces represented as an IntensityTable.
Parameters
-----------
spot_results: SpotFindingResults
Spots found across rounds/channels of an ImageStack
"""
# create IntensityTable with same x/y/z info accross all r/ch
spot_attributes = list(spot_results.values())[0].spot_attrs
intensity_table = IntensityTable.zeros(
spot_attributes=spot_attributes,
round_labels=spot_results.round_labels,
ch_labels=spot_results.ch_labels,
)
for r, c in spot_results.keys():
value = spot_results[{Axes.ROUND: r, Axes.CH: c}].spot_attrs.data[Features.INTENSITY]
# if no exact match set value to 0
value = 0 if value.empty else value
intensity_table.loc[dict(c=c, r=r)] = value
return intensity_table
def build_traces_sequential(spot_results: SpotFindingResults, **kwargs) -> IntensityTable:
"""
Build spot traces without merging across channels and imaging rounds. Used for sequential
methods like smFIsh.
Parameters
----------
spot_results: SpotFindingResults
Spots found across rounds/channels of an ImageStack
Returns
-------
IntensityTable :
concatenated input SpotAttributes, converted to an IntensityTable object
"""
all_spots = pd.concat([sa.spot_attrs.data for sa in spot_results.values()],
ignore_index=True, sort=True)
# reassign spot_ids to index number so they are unique
all_spots['spot_id'] = all_spots.index
intensity_table = IntensityTable.zeros(
spot_attributes=SpotAttributes(all_spots),
ch_labels=spot_results.ch_labels,
round_labels=spot_results.round_labels,
)
i = 0
for (r, c), spot_attrs in spot_results.items():
for _, row in spot_attrs.spot_attrs.data.iterrows():
selector = dict(features=i, c=c, r=r)
intensity_table.loc[selector] = row[Features.INTENSITY]
i += 1
return intensity_table
def build_traces_nearest_neighbors(spot_results: SpotFindingResults, anchor_round: int=0,
search_radius: int=3):
"""
Combine spots found across round and channels of an ImageStack using a nearest neighbors
strategy
Parameters
-----------
spot_results : SpotFindingResults
Spots found across rounds/channels of an ImageStack
anchor_round : int
The imaging round against which other rounds will be checked for spots in the same
approximate pixel location.
search_radius : int
Number of pixels over which to search for spots in other rounds and channels.
"""
per_round_spot_results = _merge_spots_by_round(spot_results)
distances, indices = _match_spots(
per_round_spot_results,
anchor_round=anchor_round
)
intensity_table = _build_intensity_table(
per_round_spot_results, distances, indices,
rounds=spot_results.round_labels,
channels=spot_results.ch_labels,
search_radius=search_radius,
anchor_round=anchor_round
)
return intensity_table
TRACE_BUILDERS: Mapping[TraceBuildingStrategies, Callable] = {
TraceBuildingStrategies.EXACT_MATCH: build_spot_traces_exact_match,
TraceBuildingStrategies.NEAREST_NEIGHBOR: build_traces_nearest_neighbors,
TraceBuildingStrategies.SEQUENTIAL: build_traces_sequential,
}
|
py | 1a3d84ed4c663524a5091c7bfb0b31fa15300ea8 | """Useful utilities for interacting with Evergreen."""
from datetime import datetime, date
from typing import Any, Iterable, Optional
from dateutil.parser import parse
EVG_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
EVG_SHORT_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
EVG_DATE_FORMAT = "%Y-%m-%d"
EVG_DATE_INPUT_FORMAT = '"%Y-%m-%dT%H:%M:%S.000Z"'
def parse_evergreen_datetime(evg_date: Optional[Any]) -> Optional[datetime]:
"""
Convert an evergreen datetime string into a datetime object.
:param evg_date: String to convert to a datetime.
:return: datetime version of date.
"""
if not evg_date:
return None
if type(evg_date) in [int, float]:
return datetime.fromtimestamp(evg_date)
return parse(evg_date)
def parse_evergreen_short_datetime(evg_date: Optional[str]) -> Optional[datetime]:
"""
Convert an evergreen datetime string into a datetime object.
:param evg_date: String to convert to a datetime.
:return: datetime version of date.
"""
if not evg_date:
return None
return datetime.strptime(evg_date, EVG_SHORT_DATETIME_FORMAT)
def format_evergreen_datetime(when: datetime) -> str:
"""
Convert a datetime object into an evergreen consumable string.
:param when: datetime to convert.
:return: string evergreen can understand.
"""
return when.strftime(EVG_DATE_INPUT_FORMAT)
def evergreen_input_to_output(input_date: str) -> str:
"""
Convert a date from evergreen to a date to send back to evergreen.
:param input_date: date to convert.
:return: date to send to evergreen.
"""
intermediate = parse_evergreen_datetime(input_date)
if intermediate:
return format_evergreen_datetime(intermediate)
return input_date
def parse_evergreen_date(evg_date: Optional[str]) -> Optional[date]:
"""
Convert an evergreen date string into a date object.
:param evg_date: String to convert to a date.
:return: date version of date.
"""
if not evg_date:
return None
return datetime.strptime(evg_date, EVG_DATE_FORMAT).date()
def iterate_by_time_window(
iterator: Iterable, before: datetime, after: datetime, time_attr: str
) -> Iterable:
"""
Iterate over a window of time.
For a given iterator, generate the items that are within the specified time window.
Note: Since most evergreen iterators start with the most recent items and then look backwards
in time, `start` and `end` refer to the start and end of when items will be seen (i.e. `start`
should be later in time than `end` since we will start seeing new items first.
:param iterator: Iterator to window.
:param before: Return items earlier than this timestamp.
:param after: Return items later than this timestamp.
:param time_attr: Attribute of items in the iterator containing timestamp to check.
:return: Iterator for items in the given time window.
"""
for item in iterator:
item_time = getattr(item, time_attr)
if item_time > before:
continue
if item_time < after:
break
yield item
|
py | 1a3d852c8290ac9eb598b9f4865c3b59e0f9cfba | import argparse
from datetime import datetime, timedelta
import praw
from prettytable import PrettyTable
from psaw import PushshiftAPI
import config_terminal as cfg
from helper_funcs import check_positive
# -------------------------------------------------------------------------------------------------------------------
def due_diligence(l_args, s_ticker):
parser = argparse.ArgumentParser(prog='red',
description="""Print top stock's due diligence from other users. [Source: Reddit] """)
parser.add_argument('-l', "--limit", action="store", dest="n_limit", type=check_positive, default=5,
help='limit of posts to retrieve.')
parser.add_argument('-d', "--days", action="store", dest="n_days", type=check_positive, default=3,
help="number of prior days to look for.")
parser.add_argument('-a', "--all", action="store_true", dest="b_all", default=False,
help="""search through all flairs (apart from Yolo and Meme), otherwise we focus on
specific flairs: DD, technical analysis, Catalyst, News, Advice, Chart """)
(ns_parser, l_unknown_args) = parser.parse_known_args(l_args)
if l_unknown_args:
print(f"The following args couldn't be interpreted: {l_unknown_args}\n")
return
praw_api = praw.Reddit(client_id=cfg.API_REDDIT_CLIENT_ID,
client_secret=cfg.API_REDDIT_CLIENT_SECRET,
username=cfg.API_REDDIT_USERNAME,
user_agent=cfg.API_REDDIT_USER_AGENT,
password=cfg.API_REDDIT_PASSWORD)
psaw_api = PushshiftAPI()
n_ts_after = int((datetime.today() - timedelta(days=ns_parser.n_days)).timestamp())
l_flair_text = ['DD', 'technical analysis', 'Catalyst', 'News', 'Advice', 'Chart']
l_sub_reddits = ['pennystocks', 'RobinHoodPennyStocks', 'Daytrading', 'StockMarket', 'stocks', 'investing', 'wallstreetbets']
submissions = psaw_api.search_submissions(after=int(n_ts_after),
subreddit=l_sub_reddits,
q=s_ticker,
filter=['id'])
d_submission = {}
n_flair_posts_found = 0
while True:
submission = next(submissions, None)
if submission:
# Get more information about post using PRAW api
submission = praw_api.submission(id=submission.id)
# Ensure that the post hasn't been removed in the meanwhile
if not submission.removed_by_category:
# Either just filter out Yolo, and Meme flairs, or focus on DD, based on b_DD flag
if (submission.link_flair_text in l_flair_text,
submission.link_flair_text not in ['Yolo', 'Meme'])[ns_parser.b_all]:
# Refactor data
s_datetime = datetime.utcfromtimestamp(submission.created_utc).strftime("%d/%m/%Y %H:%M:%S")
s_link = f"https://www.reddit.com{submission.permalink}"
s_all_awards = ""
for award in submission.all_awardings:
s_all_awards += f"{award['count']} {award['name']}\n"
s_all_awards = s_all_awards[:-2]
# Create dictionary with data to construct dataframe allows to save data
d_submission[submission.id] = {
'created_utc': s_datetime,
'subreddit': submission.subreddit,
'link_flair_text': submission.link_flair_text,
'title':submission.title,
'score': submission.score,
'link': s_link,
'num_comments': submission.num_comments,
'upvote_ratio': submission.upvote_ratio,
'awards': s_all_awards
}
# Print post data collected so far
print(f"{s_datetime} - {submission.title}")
print(f"{s_link}")
t_post = PrettyTable(['Subreddit', 'Flair', 'Score', '# Comments', 'Upvote %', "Awards"])
t_post.add_row([submission.subreddit, submission.link_flair_text, submission.score,
submission.num_comments,f"{round(100*submission.upvote_ratio)}%", s_all_awards])
print(t_post)
print("\n")
# If needed, submission.comments could give us the top comments
# Increment count of valid posts found
n_flair_posts_found += 1
# Check if number of wanted posts found has been reached
if n_flair_posts_found > ns_parser.n_limit-1:
break
# Check if search_submissions didn't get anymore posts
else:
break
print(f"{('No more posts with specified requirements found.', '')[n_flair_posts_found > ns_parser.n_limit-1]}")
# Create df with found data. Useful for saving all info in excel file.
#df_submissions = pd.DataFrame.from_dict(d_submission, orient='index', columns=list(d_submission[next(iter(d_submission.keys()))].keys()))
#df_submissions.sort_values(by=['created_utc'], inplace=True, ascending=True)
|
py | 1a3d855ab9a9c9068f0cc5fae5807febd687cffe | from office365.runtime.paths.service_operation import ServiceOperationPath
from office365.sharepoint.base_entity import BaseEntity
class RoleDefinition(BaseEntity):
"""Defines a single role definition, including a name, description, and set of rights."""
@property
def id(self):
"""Specifies the identifier of the role definition.
Its value MUST be equal to or greater than 1073741824."""
return self.properties.get('Id', None)
@property
def role_type_kind(self):
"""Specifies the type of the role definition.
Its value MUST be equal to or greater than 0. Its value MUST be equal to or less than 5."""
return self.properties.get('RoleTypeKind', None)
@property
def name(self):
"""Gets a value that specifies the role definition name."""
return self.properties.get('Name', None)
@name.setter
def name(self, value):
"""Sets a value that specifies the role definition name."""
self.set_property('Name', value)
@property
def description(self):
"""Gets or sets a value that specifies the description of the role definition."""
return self.properties.get('Description', None)
@description.setter
def description(self, value):
"""Gets or sets a value that specifies the description of the role definition."""
self.set_property('Description', value)
def set_property(self, name, value, persist_changes=True):
if self.resource_path is None:
if name == "Id":
self._resource_path = ServiceOperationPath(
"GetById", [value], self._parent_collection.resource_path)
return super(RoleDefinition, self).set_property(name, value, persist_changes)
|
py | 1a3d8755c2ec33402b7c7403b998e168b193c3e8 | """
Pay to delegated puzzle
In this puzzle program, the solution must be a signed delegated puzzle, along with
its (unsigned) solution. The delegated puzzle is executed, passing in the solution.
This obviously could be done recursively, arbitrarily deep (as long as the maximum
cost is not exceeded).
If you want to specify the conditions directly (thus terminating the potential recursion),
you can use p2_conditions.
This roughly corresponds to bitcoin's graftroot.
"""
from tst.types.blockchain_format.program import Program
from . import p2_conditions
from .load_clvm import load_clvm
MOD = load_clvm("p2_delegated_puzzle.clvm")
def puzzle_for_pk(public_key: bytes) -> Program:
return MOD.curry(public_key)
def solution_for_conditions(conditions) -> Program:
delegated_puzzle = p2_conditions.puzzle_for_conditions(conditions)
return solution_for_delegated_puzzle(delegated_puzzle, Program.to(0))
def solution_for_delegated_puzzle(delegated_puzzle: Program, delegated_solution: Program) -> Program:
return delegated_puzzle.to([delegated_puzzle, delegated_solution])
|
py | 1a3d895d6f682d2bec10b92bc81ebf7a7e61b42c | import socket
from flask import Flask
app = Flask(__name__)
@app.route("/")
def home():
return "ahoj"
if __name__ == "__main__":
# resolving machine IP address for correct web publishing
hostname = socket.gethostname()
ip_here = socket.gethostbyname(hostname)
app.run(debug=True, host=ip_here)
|
py | 1a3d8bc4e156f24c402444699e2c74fd48b57ee3 | #a10
import sys,re
def main():
# Get arguments from standard input
# If no file is specified generate the error and exit
if len(sys.argv) == 1:
print("Ciphertext file must be specified!")
sys.exit()
# If the file is specified open the file and use 'wells.txt'
# as the corpus file
c = getFile(sys.argv[1])
corpus = getFile('wells.txt')
# If n is specified in the command line use that or just use 3 as the default value
if len(sys.argv) > 2:
n = int(sys.argv[2])
else:
n = 3 # default value
# get bigrams from the corpus and the ciphertext file
bigramsRef = getCorpusBigrams(corpus)
ctbigrams = getCTBigrams(c)
# map the n most frequent ciphertext bigrams to the most frequent corpus bigrams
mapping = mapNBigrams(bigramsRef, ctbigrams, n)
while True:
# loops infinitely
# get input ciphertext from user
enciphered = input()
if enciphered == 'quit': # for testing
break
# for each word in the input:
for word in enciphered.split():
# construct regular expressions for known bigrams
regexp = subBigrams(word, mapping)
regexList = searchWords(regexp)
finalList = corpusFreq(corpus,regexList)
finalwords = ' '.join(finalList)
print(finalwords.lower())
return
def getFile(filename):
# returns a string of the contents of a file
f = open(filename, 'r')
s = f.read()
f.close()
return s
def preproc(aString):
# convert to uppercase, remove non-alpha chars (keep spaces), split into words
s = aString.upper()
for c in s:
if c not in ' ABCDEFGHIJKLMNOPQRSTUVWXYZ':
s = s.replace(c, ' ')
s = s.split()
return s
def dictToSortedList(dictionary, n):
# returns a list of n keys in order of their values, highest to lowest
# for tiesbreaking, goes in alphabetical order
result = []
for i in range(len(dictionary)):
if n == 0:
break
# get the maximum
maxkey = ''
maxval = -1
for item in dictionary:
if dictionary[item] > maxval:
# found max
maxkey = item
maxval = dictionary[item]
elif dictionary[item] == maxval:
# found something equal to the current max
# tie breaking by alphabetical order
if item < maxkey:
maxkey = item
# so that this key will no longer be considered for max
dictionary[maxkey] = -2
result.append(maxkey)
n -= 1
# return the keys in order
return result
def getCTBigrams(ciphertext):
# different than corpus bigrams bc these must not overlap
bigrams = {}
# remove non-alpha chars, convert to uppercase etc
ciphertext = preproc(ciphertext)
for word in ciphertext:
# get bigrams and their counts
if len(word) ==1:
# word is too small to have bigrams
continue
# word is large enough to have bigrams
i = 0
while i < len(word)-1:
bigram = word[i]+word[i+1]
if bigram in bigrams:
# increment its count
bigrams[bigram] +=1
else:
# set count to 1
bigrams[bigram] = 1
# go to the next 2 chars
i += 2
# return the dict of bigram counts
return bigrams
def getCorpusBigrams(corpus):
# counts the bigrams in the corpus file
# DOES overlap bigrams
bigrams = {}
# remove non-alpha chars, convert to uppercase, etc
corpus = preproc(corpus)
for word in corpus:
# get bigrams and their count
if len(word) ==1:
# word is too short to have bigrams
continue
for i in range(len(word)-1):
bigram = word[i]+word[i+1]
if bigram in bigrams:
# increment its count
bigrams[bigram] +=1
else:
# set its count to 1
bigrams[bigram] = 1
# return dict of bigrams and their counts
return bigrams
def mapNBigrams(reference, cipherBGs, n):
# maps the n most frequent ciphertext bigrams to the n most frequent corpus bigrams
# get the lists of n most frequent bigrams
reference = dictToSortedList(reference, n)
cipherBGs = dictToSortedList(cipherBGs, n)
mapping = {}
# map the bigrams
for i in range(n):
mapping[cipherBGs[i]] = reference[i]
return mapping
def subBigrams(ciphertext, mapping):
# returns a LIST of regex
# the reason why its a list is because the ciphertext word will always have
# an even numbered length, even if the word is of odd length. so, we need 2
# regular expressions, one for each case
# note: will not replace if there are two bigrams overlapping (ex. ABC where AB and BC both have mappings)
# assumes no whitespace
# to keep track of what characters have been changed
changed = [False]* len(ciphertext)
ciphertext = list(ciphertext)
for i in range(len(ciphertext)-1):
# if this has already been replaced, go to the next char
if changed[i]:
continue
# for each character that has a neighbor to the right
bigram = ciphertext[i] + ciphertext[i+1]
if bigram in mapping:
# this character has not been changed yet
changed[i] = True
changed[i+1] = True
# replace with the mapping
ciphertext[i], ciphertext[i+1] = mapping[bigram][0], mapping[bigram][1]
# replace characters that have not been changed with .
for i in range(len(ciphertext)):
if changed[i] == False:
ciphertext[i] = '.'
regexp = ['^'+ ''.join(ciphertext) + '$'] # this is for the even case
# for the odd case - if the end of the ciphertext is a known bigram, there
# is no odd case
if ciphertext[len(ciphertext)-1] == '.':
odd = '^'+ ''.join(ciphertext)[:-1] + '$'
regexp.append(odd)
return regexp
def searchWords(regexp):
# regexp is a list of the regular expressions generated.
# regexList is a list of all the words found in the dictionary
# after compiling the regular expression
# This method returns the list with all the possible words for all the
# regular expressions i.e for both odd and even cases if any
regexList = []
words = getFile("dictionary.txt")
words = words.split()
for item in regexp:
for word in words:
item = re.compile(item)
if re.match(item,word):
regexList.append(word)
return regexList
def corpusFreq(corpus,regexList):
# This method first processes the corpus file and returns
# a list of the words
# The frequencyDict is a dictionary with word as the key
# and the frequency (i.e the count) as the value
# After getting the frequencyDict we use the dicToSortedList
# method to sort it in the order of increasing frequency and
# in case of same frequency list them alphabetically
# The sorted list is then returned
newCorpus = preproc(corpus)
frequencyDict = {}
for word in regexList:
count = 0
for item in newCorpus:
if word == item:
count = count + 1
frequencyDict[word]=count
frequencyDict = dictToSortedList(frequencyDict,len(frequencyDict))
return frequencyDict
main()
|
py | 1a3d8c24b4c6fdfa0d4bed4569b87190f68d7510 | n = int(input('Digite o primeiro termo: '))
r = int(input('Digite a razao: '))
for c in range(0,10):
print(n+r*c) |
py | 1a3d8c7f478d93eab824de00ce875f1cbb148331 | from typing import Any
import typing
from conda_forge_tick.xonsh_utils import indir
from .core import MiniMigrator
from conda_forge_tick.utils import as_iterable
if typing.TYPE_CHECKING:
from ..migrators_types import AttrsTypedDict
class PipMigrator(MiniMigrator):
bad_install = (
"python setup.py install",
"python -m pip install --no-deps --ignore-installed .",
)
def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
scripts = as_iterable(
attrs.get("meta_yaml", {}).get("build", {}).get("script", []),
)
return not bool(set(self.bad_install) & set(scripts))
def migrate(self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any) -> None:
with indir(recipe_dir):
with open("meta.yaml") as fp:
lines = fp.readlines()
new_lines = []
for line in lines:
for b in self.bad_install:
tst_str = "script: %s" % b
if tst_str in line:
line = line.replace(
tst_str,
"script: {{ PYTHON }} -m pip install . --no-deps -vv",
)
break
new_lines.append(line)
with open("meta.yaml", "w") as fp:
for line in new_lines:
fp.write(line)
|
py | 1a3d8d83f99e39e19cb7aacae511b23a8f63181a | import warnings
from torchvision.datasets import *
from .base import *
from .coco import COCOSegmentation
from .ade20k import ADE20KSegmentation
from .pascal_voc import VOCSegmentation
from .pascal_aug import VOCAugSegmentation
from .pcontext import ContextSegmentation
from .cityscapes import CitySegmentation
from .imagenet import ImageNetDataset
from .minc import MINCDataset
from .steel import SteelSegmentation
from ..utils import EncodingDeprecationWarning
datasets = {
'coco': COCOSegmentation,
'ade20k': ADE20KSegmentation,
'pascal_voc': VOCSegmentation,
'pascal_aug': VOCAugSegmentation,
'pcontext': ContextSegmentation,
'citys': CitySegmentation,
'imagenet': ImageNetDataset,
'minc': MINCDataset,
'cifar10': CIFAR10,
'steel': SteelSegmentation,
}
acronyms = {
'coco': 'coco',
'pascal_voc': 'voc',
'pascal_aug': 'voc',
'pcontext': 'pcontext',
'ade20k': 'ade',
'citys': 'citys',
'minc': 'minc',
'cifar10': 'cifar10',
}
def get_dataset(name, **kwargs):
return datasets[name.lower()](**kwargs)
def _make_deprecate(meth, old_name):
new_name = meth.__name__
def deprecated_init(*args, **kwargs):
warnings.warn("encoding.dataset.{} is now deprecated in favor of encoding.dataset.{}."
.format(old_name, new_name), EncodingDeprecationWarning)
return meth(*args, **kwargs)
deprecated_init.__doc__ = r"""
{old_name}(...)
.. warning::
This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`.
See :func:`~torch.nn.init.{new_name}` for details.""".format(
old_name=old_name, new_name=new_name)
deprecated_init.__name__ = old_name
return deprecated_init
get_segmentation_dataset = _make_deprecate(get_dataset, 'get_segmentation_dataset')
|
py | 1a3d8d8f7fc97bdc4cca831b84c6bb46eb78c3e7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class Article(object):
def __init__(self):
self._action_name = None
self._desc = None
self._image_url = None
self._title = None
self._url = None
@property
def action_name(self):
return self._action_name
@action_name.setter
def action_name(self, value):
self._action_name = value
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
@property
def image_url(self):
return self._image_url
@image_url.setter
def image_url(self, value):
self._image_url = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value
def to_alipay_dict(self):
params = dict()
if self.action_name:
if hasattr(self.action_name, 'to_alipay_dict'):
params['action_name'] = self.action_name.to_alipay_dict()
else:
params['action_name'] = self.action_name
if self.desc:
if hasattr(self.desc, 'to_alipay_dict'):
params['desc'] = self.desc.to_alipay_dict()
else:
params['desc'] = self.desc
if self.image_url:
if hasattr(self.image_url, 'to_alipay_dict'):
params['image_url'] = self.image_url.to_alipay_dict()
else:
params['image_url'] = self.image_url
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
if self.url:
if hasattr(self.url, 'to_alipay_dict'):
params['url'] = self.url.to_alipay_dict()
else:
params['url'] = self.url
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Article()
if 'action_name' in d:
o.action_name = d['action_name']
if 'desc' in d:
o.desc = d['desc']
if 'image_url' in d:
o.image_url = d['image_url']
if 'title' in d:
o.title = d['title']
if 'url' in d:
o.url = d['url']
return o
|
py | 1a3d8ed0f90d278f4c11c2560c6d0f74efe9b83b | # Copyright 2015 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_utils import uuidutils
from neutron.common import constants
from neutron import context
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.mech_sriov.agent.common import exceptions
from neutron.plugins.ml2.drivers.mech_sriov.agent.extension_drivers import (
qos_driver)
from neutron.services.qos import qos_consts
from neutron.tests import base
class QosSRIOVAgentDriverTestCase(base.BaseTestCase):
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
def setUp(self):
super(QosSRIOVAgentDriverTestCase, self).setUp()
self.context = context.get_admin_context()
self.qos_driver = qos_driver.QosSRIOVAgentDriver()
self.qos_driver.initialize()
self.qos_driver.eswitch_mgr = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_max_rate = mock.Mock()
self.qos_driver.eswitch_mgr.set_device_min_tx_rate = mock.Mock()
self.qos_driver.eswitch_mgr.clear_max_rate = mock.Mock()
self.qos_driver.eswitch_mgr.clear_min_tx_rate = mock.Mock()
self.max_rate_mock = self.qos_driver.eswitch_mgr.set_device_max_rate
self.min_tx_rate_mock = \
self.qos_driver.eswitch_mgr.set_device_min_tx_rate
self.clear_max_rate_mock = self.qos_driver.eswitch_mgr.clear_max_rate
self.clear_min_tx_rate_mock = \
self.qos_driver.eswitch_mgr.clear_min_tx_rate
self.rule = self._create_bw_limit_rule_obj()
self.rule_min_tx_rate = self._create_minimum_bandwidth_rule_obj()
self.qos_policy = self._create_qos_policy_obj([self.rule])
self.qos_policy_min_tx_rate = self._create_qos_policy_obj(
[self.rule_min_tx_rate])
self.port = self._create_fake_port(self.qos_policy.id)
self.port_min = self._create_fake_port(self.qos_policy_min_tx_rate.id)
def _create_bw_limit_rule_obj(self):
rule_obj = rule.QosBandwidthLimitRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.max_kbps = 2
rule_obj.max_burst_kbps = 200
rule_obj.obj_reset_changes()
return rule_obj
def _create_minimum_bandwidth_rule_obj(self):
rule_obj = rule.QosMinimumBandwidthRule()
rule_obj.id = uuidutils.generate_uuid()
rule_obj.min_kbps = 200
rule_obj.direction = constants.EGRESS_DIRECTION
rule_obj.obj_reset_changes()
return rule_obj
def _create_qos_policy_obj(self, rules):
policy_dict = {'id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'name': 'test',
'description': 'test',
'shared': False,
'rules': rules}
policy_obj = policy.QosPolicy(self.context, **policy_dict)
policy_obj.obj_reset_changes()
for policy_rule in policy_obj.rules:
policy_rule.qos_policy_id = policy_obj.id
policy_rule.obj_reset_changes()
return policy_obj
def _create_fake_port(self, qos_policy_id):
return {'port_id': uuidutils.generate_uuid(),
'profile': {'pci_slot': self.PCI_SLOT},
'device': self.ASSIGNED_MAC,
qos_consts.QOS_POLICY_ID: qos_policy_id,
'device_owner': uuidutils.generate_uuid()}
def test_create_rule(self):
self.qos_driver.create(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_update_rule(self):
self.qos_driver.update(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule.max_kbps)
def test_delete_rules_on_assigned_vf(self):
self.qos_driver.delete(self.port, self.qos_policy)
self.max_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, 0)
def test_delete_rules_on_released_vf(self):
del self.port['device_owner']
self.qos_driver.delete(self.port, self.qos_policy)
self.clear_max_rate_mock.assert_called_once_with(self.PCI_SLOT)
def test__set_vf_max_rate_captures_sriov_failure(self):
self.max_rate_mock.side_effect = exceptions.SriovNicError()
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
def test__set_vf_max_rate_unknown_device(self):
with mock.patch.object(self.qos_driver.eswitch_mgr, 'device_exists',
return_value=False):
self.qos_driver._set_vf_max_rate(self.ASSIGNED_MAC, self.PCI_SLOT)
self.assertFalse(self.max_rate_mock.called)
def test_create_minimum_bandwidth(self):
self.qos_driver.create(self.port_min, self.qos_policy_min_tx_rate)
self.min_tx_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule_min_tx_rate.min_kbps)
def test_update_minimum_bandwidth(self):
self.qos_driver.update(self.port_min, self.qos_policy_min_tx_rate)
self.min_tx_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, self.rule_min_tx_rate.min_kbps)
def test_delete_minimum_bandwidth_on_assigned_vf(self):
self.qos_driver.delete(self.port_min, self.qos_policy_min_tx_rate)
self.min_tx_rate_mock.assert_called_once_with(
self.ASSIGNED_MAC, self.PCI_SLOT, 0)
def test_delete_minimum_bandwidth_on_released_vf(self):
del self.port_min['device_owner']
self.qos_driver.delete(self.port_min, self.qos_policy_min_tx_rate)
self.clear_min_tx_rate_mock.assert_called_once_with(self.PCI_SLOT)
|
py | 1a3d8f5e8deb44415895fdc58697ee783ed4ed88 | from os import urandom
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
class AESCipher:
""" Wrapper for cryptography aes cipher.
:attr char: padding_value(char): padding character used for encryption.
"""
padding_value = "\0"
def __init__(self, key, iv_length):
"""
Cipher constructor.
:param str key: AES key
:param str iv: initialization vector
"""
self._key = key
self._iv_length = iv_length
self._cipher = Cipher(
algorithms.AES(key),
modes.CBC(urandom(iv_length)),
backend=default_backend())
def encrypt(self, content):
"""
Encrypt string using (key, iv) pair.
Uses padding_value if content has wrong padding.
:param str content: unencrypted string.
:returns: Encrypted string.
"""
padding = len(content) % 16
if padding != 0:
content += "".join(self.padding_value for i in range(16 - padding))
iv = urandom(self._iv_length)
self._cipher.mode = modes.CBC(iv)
encryptor = self._cipher.encryptor()
ct = encryptor.update(content.encode('utf-8')) + encryptor.finalize()
return iv + ct
def decrypt(self, content):
"""
Decrypt string using (key, iv) pair.
Removes padding_value from the end.
:param str content: encrypted string.
:returns: Unencrypted string.
"""
iv = content[:self._iv_length]
self._cipher.mode = modes.CBC(iv)
decryptor = self._cipher.decryptor()
content = decryptor.update(content[self._iv_length:]) + decryptor.finalize()
content = content.decode('utf-8')
return content.rstrip(self.padding_value)
def encrypt_file(self, in_filename):
"""
Encrypt file content using (key, iv) pair.
Uses padding_value if content has wrong padding.
:param str in_filename(in_filename): unencrypted data file name.
:returns: Encrypted string.
"""
with open(in_filename, "rb") as file:
content = file.read()
return self.encrypt(content)
def decrypt_file(self, in_filename):
"""
Decrypt file using (key, iv) pair.
Removes padding_value from the end.
:param str out_filename(out_filename): encrypted data file name.
:returns: Unencrypted string.
"""
with open(in_filename, "rb") as file:
content = file.read()
return self.decrypt(content)
def encrypt_file_save_file(self, in_filename, out_filename):
"""
Encrypt file using (key, iv) pair and save result in a file.
Uses padding_value if content has wrong padding.
:param str in_filename(in_filename): unencrypted data file name.
:param str out_filename(out_filename): encrypted data file name.
"""
content = self.encrypt_file(in_filename)
with open(out_filename, "wb+") as out:
out.write(content)
def decrypt_file_save_file(self, in_filename, out_filename):
"""
Decrypt file using (key, iv) pair and save result in a file.
Removes padding_value from the end.
:param str in_filename(in_filename): encrypted data file name.
:param str out_filename(out_filename): unencrypted data file name.
"""
content = self.decrypt_file(in_filename)
with open(out_filename, "wb+") as out:
out.write(content) |
py | 1a3d90078569922c65caa1eb3dcf61efdff8d73a | import os
import gmplot
import requests
from requests import RequestException
from numpy import random
class CoordinatesPlotter:
@staticmethod
def plot_coordinates_on_map():
apikey = ''
try:
response = requests.get("")
response.raise_for_status()
print(response)
response_json = response.json()
feeds = response_json['feeds']
lat_list = []
lon_list = []
for feed in feeds:
lat = float(feed['field2'])
lon = float(feed['field1'])
lat_list.append(lat)
lon_list.append(lon)
curr_lat = lat_list[-1]
curr_lon = lon_list[-1]
origin_lat = lat_list[0]
origin_lon = lon_list[0]
zoom_lvl = 16
gmap = gmplot.GoogleMapPlotter(origin_lat, origin_lon, zoom_lvl, apikey=apikey)
for i in range(100):
curr_lat += (random.rand() - 0.5) / 10000.0
lat_list.append(curr_lat)
curr_lon += (random.rand() - 0.5) / 10000.0
lon_list.append(curr_lon)
gmap.plot(lat_list, lon_list, edge_width=7, color='blue')
print(lat_list[0:5])
print(lon_list[0:5])
gmap.draw('map.html')
os.system('map.html')
except RequestException:
print('Request not satisfied!')
|
py | 1a3d91e4072e96765b182fa37e2953dd5ed3fd18 | from rest_framework.routers import DefaultRouter
from django.urls import path, include
from .views import LostViewSet
router = DefaultRouter()
router.register(r"", LostViewSet, basename="lost")
urlpatterns = [
path("", include(router.urls)),
] |
py | 1a3d921b3e6c4fb26fc8b4bcb1e7e6e11ba28570 | #
# Copyright 2020 Intellivoid Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ziproto import Headmap
from ziproto.ValueType import ValueType
import struct
class MapIter:
def __init__(self, decoder_obj):
if not decoder_obj.is_map():
raise TypeError('is not map')
self.bytedata = decoder_obj.bytedata
def __iter__(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
count, offset = value(self.bytedata[1:])
body = self.bytedata[1+offset:]
x = 0
current = None
while x<count:
if current is None:
current = Decoder(body)
else:
current = v.next()
v = current.next()
yield current, v
x+=1
class Decoder:
__slots__ = ('bytedata', 'filled')
def __init__(self, bytedata, filled=True):
self.bytedata = memoryview(bytedata)
self.filled = filled
def get_type(self):
head = self.bytedata[0]
t, _ = Headmap.HEAD_MAP[head]
return t
def get(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.ARRAY:
return [x.get() for x in self]
elif t == ValueType.MAP:
return {k.get(): v.get() for k, v in self.items()}
else:
x, _ = value(self.bytedata[1:])
if t == ValueType.STR:
return x.tobytes().decode('utf-8')
else:
return x
def is_nil(self):
head = self.bytedata[0]
t, _ = Headmap.HEAD_MAP[head]
if t == ValueType.NIL:
return True
return False
def is_array(self):
head = self.bytedata[0]
t, _ = Headmap.HEAD_MAP[head]
return t == ValueType.ARRAY
def is_map(self):
head = self.bytedata[0]
t, _ = Headmap.HEAD_MAP[head]
return t == ValueType.MAP
def get_bool(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.BOOL:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not bool. %s' % t)
def get_int(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.INT:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not int. %s' % t)
def get_float(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.FLOAT:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not float. %s' % t)
def get_number(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.INT or t == ValueType.FLOAT:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not number. %s' % t)
def get_str(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.STR:
x, _ = value(self.bytedata[1:])
return x.tobytes().decode('utf-8')
raise ValueError('is not str. %s' % t)
def get_bin(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.BIN:
x, _ = value(self.bytedata[1:])
return x
raise ValueError('is not bin. %s' % t)
def __len__(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.ARRAY or t == ValueType.MAP:
count, _ = value(self.bytedata[1:])
return count
raise ValueError('is not array or map. %s' % t)
def __getitem__(self, index):
if isinstance(index, int):
for i, x in enumerate(self):
if i == index:
return x
else:
for k, v in self.items():
if k.get() == index:
return v
def __iter__(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
if t == ValueType.ARRAY:
count, offset = value(self.bytedata[1:])
x = 0
current = None
while x<count:
if current is None:
current = Decoder(self.bytedata[1+offset:])
else:
current = current.next()
yield current
x += 1
else:
raise ValueError('is not array. %s' % t)
def get_bytes(self):
if self.filled:
return self.bytedata
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
count, offset = value(self.bytedata[1:])
if t == ValueType.ARRAY:
x = 0
pos = 1+offset
body = self.bytedata[pos:]
current = Decoder(body)
while x<count:
pos += len(current.get_bytes())
current = current.next()
x+=1
return self.bytedata[0:pos]
elif t == ValueType.MAP:
x = 0
pos = 1+offset
body = self.bytedata[pos:]
current = Decoder(body)
while x<count:
v = current.next()
pos += len(current.get_bytes())
pos += len(v.get_bytes())
current = v.next()
x+=1
return self.bytedata[0:pos]
else:
return self.bytedata[0:1+offset]
def next(self):
head = self.bytedata[0]
t, value = Headmap.HEAD_MAP[head]
count, offset = value(self.bytedata[1:])
body = self.bytedata[1+offset:]
if t == ValueType.ARRAY:
x = 0
current = Decoder(body)
while x<count:
current
current = current.next()
x+=1
return current
elif t == ValueType.MAP:
x = 0
current = Decoder(body)
while x<count:
v = current.next()
current, v
current = v.next()
x+=1
return current
else:
return Decoder(body)
def items(self):
return MapIter(self)
def decode(bytes):
Decoder_proto = Decoder(bytes, True)
return(Decoder_proto.get())
|
py | 1a3d92aea6a3dc70a07dfe66c555209d498ac3ea | from django import forms
from django.contrib import admin
from django.contrib.admin.utils import unquote
from django.http import (
JsonResponse, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
)
from django.utils import timezone
from django.urls import re_path
from experiments import conf
from experiments.admin_utils import get_result_context
from experiments.models import Experiment
from experiments.utils import participant
class ExperimentAdmin(admin.ModelAdmin):
list_display = ('name', 'start_date', 'end_date', 'state')
list_filter = ('state', 'start_date', 'end_date')
ordering = ('-start_date',)
search_fields = ('=name',)
actions = None
readonly_fields = ['start_date', 'end_date']
def get_fieldsets(self, request, obj=None):
"""
Slightly different fields are shown for Add and Change:
- default_alternative can only be changed
- name can only be set on Add
"""
main_fields = ('description', 'start_date', 'end_date', 'state')
if obj:
main_fields += ('default_alternative',)
else:
main_fields = ('name',) + main_fields
return (
(None, {
'fields': main_fields,
}),
('Relevant Goals', {
'classes': ('collapse', 'hidden-relevant-goals'),
'fields': ('relevant_chi2_goals', 'relevant_mwu_goals'),
})
)
# --------------------------------------- Default alternative
def get_form(self, request, obj=None, **kwargs):
"""
Add the default alternative dropdown with appropriate choices
"""
if obj:
if obj.alternatives:
choices = [(alternative, alternative) for alternative in obj.alternatives.keys()]
else:
choices = [(conf.CONTROL_GROUP, conf.CONTROL_GROUP)]
class ExperimentModelForm(forms.ModelForm):
default_alternative = forms.ChoiceField(choices=choices,
initial=obj.default_alternative,
required=False)
kwargs['form'] = ExperimentModelForm
return super(ExperimentAdmin, self).get_form(request, obj=obj, **kwargs)
def save_model(self, request, obj, form, change):
if change:
obj.set_default_alternative(form.cleaned_data['default_alternative'])
obj.save()
# --------------------------------------- Overriding admin views
class Media:
css = {
"all": (
'experiments/dashboard/css/admin.css',
),
}
js = (
'https://www.google.com/jsapi', # used for charts
'experiments/dashboard/js/csrf.js',
'experiments/dashboard/js/admin.js',
)
def _admin_view_context(self, extra_context=None):
context = {}
if extra_context:
context.update(extra_context)
context.update({
'all_goals': conf.ALL_GOALS,
'control_group': conf.CONTROL_GROUP,
})
return context
def add_view(self, request, form_url='', extra_context=None):
return super(ExperimentAdmin, self).add_view(request,
form_url=form_url,
extra_context=self._admin_view_context(extra_context=extra_context))
def change_view(self, request, object_id, form_url='', extra_context=None):
experiment = self.get_object(request, unquote(object_id))
context = self._admin_view_context(extra_context=extra_context)
context.update(get_result_context(request, experiment))
return super(ExperimentAdmin, self).change_view(request, object_id, form_url=form_url, extra_context=context)
# --------------------------------------- Views for ajax functionality
def get_urls(self):
experiment_urls = [
re_path(r'^set-alternative/$', self.admin_site.admin_view(self.set_alternative_view), name='experiment_admin_set_alternative'),
re_path(r'^set-state/$', self.admin_site.admin_view(self.set_state_view), name='experiment_admin_set_state'),
]
return experiment_urls + super(ExperimentAdmin, self).get_urls()
def set_alternative_view(self, request):
"""
Allows the admin user to change their assigned alternative
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
experiment_name = request.POST.get("experiment")
alternative_name = request.POST.get("alternative")
if not (experiment_name and alternative_name):
return HttpResponseBadRequest()
participant(request).set_alternative(experiment_name, alternative_name)
return JsonResponse({
'success': True,
'alternative': participant(request).get_alternative(experiment_name)
})
def set_state_view(self, request):
"""
Changes the experiment state
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
try:
state = int(request.POST.get("state", ""))
except ValueError:
return HttpResponseBadRequest()
try:
experiment = Experiment.objects.get(name=request.POST.get("experiment"))
except Experiment.DoesNotExist:
return HttpResponseBadRequest()
experiment.state = state
if state == 0:
experiment.end_date = timezone.now()
else:
experiment.end_date = None
experiment.save()
return HttpResponse()
admin.site.register(Experiment, ExperimentAdmin)
|
py | 1a3d9357aa0a887c80902d17cc1c12e26b93607e |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Base Widget class. Allows user to create widgets in the back-end that render
in the IPython notebook front-end.
"""
from contextlib import contextmanager
from collections.abc import Iterable
from IPython.core.getipython import get_ipython
from ipykernel.comm import Comm
from traitlets import (
HasTraits, Unicode, Dict, Instance, List, Int, Set, Bytes, observe, default, Container,
Undefined)
from IPython.display import display
from json import loads as jsonloads, dumps as jsondumps
from base64 import standard_b64encode
from .._version import __protocol_version__, __jupyter_widgets_base_version__
PROTOCOL_VERSION_MAJOR = __protocol_version__.split('.')[0]
def _widget_to_json(x, obj):
if isinstance(x, dict):
return {k: _widget_to_json(v, obj) for k, v in x.items()}
elif isinstance(x, (list, tuple)):
return [_widget_to_json(v, obj) for v in x]
elif isinstance(x, Widget):
return "IPY_MODEL_" + x.model_id
else:
return x
def _json_to_widget(x, obj):
if isinstance(x, dict):
return {k: _json_to_widget(v, obj) for k, v in x.items()}
elif isinstance(x, (list, tuple)):
return [_json_to_widget(v, obj) for v in x]
elif isinstance(x, str) and x.startswith('IPY_MODEL_') and x[10:] in Widget.widgets:
return Widget.widgets[x[10:]]
else:
return x
widget_serialization = {
'from_json': _json_to_widget,
'to_json': _widget_to_json
}
_binary_types = (memoryview, bytearray, bytes)
def _put_buffers(state, buffer_paths, buffers):
"""The inverse of _remove_buffers, except here we modify the existing dict/lists.
Modifying should be fine, since this is used when state comes from the wire.
"""
for buffer_path, buffer in zip(buffer_paths, buffers):
# we'd like to set say sync_data['x'][0]['y'] = buffer
# where buffer_path in this example would be ['x', 0, 'y']
obj = state
for key in buffer_path[:-1]:
obj = obj[key]
obj[buffer_path[-1]] = buffer
def _separate_buffers(substate, path, buffer_paths, buffers):
"""For internal, see _remove_buffers"""
# remove binary types from dicts and lists, but keep track of their paths
# any part of the dict/list that needs modification will be cloned, so the original stays untouched
# e.g. {'x': {'ar': ar}, 'y': [ar2, ar3]}, where ar/ar2/ar3 are binary types
# will result in {'x': {}, 'y': [None, None]}, [ar, ar2, ar3], [['x', 'ar'], ['y', 0], ['y', 1]]
# instead of removing elements from the list, this will make replacing the buffers on the js side much easier
if isinstance(substate, (list, tuple)):
is_cloned = False
for i, v in enumerate(substate):
if isinstance(v, _binary_types):
if not is_cloned:
substate = list(substate) # shallow clone list/tuple
is_cloned = True
substate[i] = None
buffers.append(v)
buffer_paths.append(path + [i])
elif isinstance(v, (dict, list, tuple)):
vnew = _separate_buffers(v, path + [i], buffer_paths, buffers)
if v is not vnew: # only assign when value changed
if not is_cloned:
substate = list(substate) # clone list/tuple
is_cloned = True
substate[i] = vnew
elif isinstance(substate, dict):
is_cloned = False
for k, v in substate.items():
if isinstance(v, _binary_types):
if not is_cloned:
substate = dict(substate) # shallow clone dict
is_cloned = True
del substate[k]
buffers.append(v)
buffer_paths.append(path + [k])
elif isinstance(v, (dict, list, tuple)):
vnew = _separate_buffers(v, path + [k], buffer_paths, buffers)
if v is not vnew: # only assign when value changed
if not is_cloned:
substate = dict(substate) # clone list/tuple
is_cloned = True
substate[k] = vnew
else:
raise ValueError("expected state to be a list or dict, not %r" % substate)
return substate
def _remove_buffers(state):
"""Return (state_without_buffers, buffer_paths, buffers) for binary message parts
A binary message part is a memoryview, bytearray, or python 3 bytes object.
As an example:
>>> state = {'plain': [0, 'text'], 'x': {'ar': memoryview(ar1)}, 'y': {'shape': (10,10), 'data': memoryview(ar2)}}
>>> _remove_buffers(state)
({'plain': [0, 'text']}, {'x': {}, 'y': {'shape': (10, 10)}}, [['x', 'ar'], ['y', 'data']],
[<memory at 0x107ffec48>, <memory at 0x107ffed08>])
"""
buffer_paths, buffers = [], []
state = _separate_buffers(state, [], buffer_paths, buffers)
return state, buffer_paths, buffers
def _buffer_list_equal(a, b):
"""Compare two lists of buffers for equality.
Used to decide whether two sequences of buffers (memoryviews,
bytearrays, or python 3 bytes) differ, such that a sync is needed.
Returns True if equal, False if unequal
"""
if len(a) != len(b):
return False
if a == b:
return True
for ia, ib in zip(a, b):
# Check byte equality, since bytes are what is actually synced
# NOTE: Simple ia != ib does not always work as intended, as
# e.g. memoryview(np.frombuffer(ia, dtype='float32')) !=
# memoryview(np.frombuffer(b)), since the format info differs.
# Compare without copying.
if memoryview(ia).cast('B') != memoryview(ib).cast('B'):
return False
return True
class LoggingHasTraits(HasTraits):
"""A parent class for HasTraits that log.
Subclasses have a log trait, and the default behavior
is to get the logger from the currently running Application.
"""
log = Instance('logging.Logger')
@default('log')
def _log_default(self):
from traitlets import log
return log.get_logger()
class CallbackDispatcher(LoggingHasTraits):
"""A structure for registering and running callbacks"""
callbacks = List()
def __call__(self, *args, **kwargs):
"""Call all of the registered callbacks."""
value = None
for callback in self.callbacks:
try:
local_value = callback(*args, **kwargs)
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warning("Exception in callback %s: %s", callback, e, exc_info=True)
else:
ip.showtraceback()
else:
value = local_value if local_value is not None else value
return value
def register_callback(self, callback, remove=False):
"""(Un)Register a callback
Parameters
----------
callback: method handle
Method to be registered or unregistered.
remove=False: bool
Whether to unregister the callback."""
# (Un)Register the callback.
if remove and callback in self.callbacks:
self.callbacks.remove(callback)
elif not remove and callback not in self.callbacks:
self.callbacks.append(callback)
def _show_traceback(method):
"""decorator for showing tracebacks in IPython"""
def m(self, *args, **kwargs):
try:
return(method(self, *args, **kwargs))
except Exception as e:
ip = get_ipython()
if ip is None:
self.log.warning("Exception in widget method %s: %s", method, e, exc_info=True)
else:
ip.showtraceback()
return m
class WidgetRegistry:
def __init__(self):
self._registry = {}
def register(self, model_module, model_module_version_range, model_name, view_module, view_module_version_range, view_name, klass):
"""Register a value"""
model_module = self._registry.setdefault(model_module, {})
model_version = model_module.setdefault(model_module_version_range, {})
model_name = model_version.setdefault(model_name, {})
view_module = model_name.setdefault(view_module, {})
view_version = view_module.setdefault(view_module_version_range, {})
view_version[view_name] = klass
def get(self, model_module, model_module_version, model_name, view_module, view_module_version, view_name):
"""Get a value"""
module_versions = self._registry[model_module]
# The python semver module doesn't work well, for example, it can't do match('3', '*')
# so we just take the first model module version.
#model_names = next(v for k, v in module_versions.items()
# if semver.match(model_module_version, k))
model_names = list(module_versions.values())[0]
view_modules = model_names[model_name]
view_versions = view_modules[view_module]
# The python semver module doesn't work well, so we just take the first view module version
#view_names = next(v for k, v in view_versions.items()
# if semver.match(view_module_version, k))
view_names = list(view_versions.values())[0]
widget_class = view_names[view_name]
return widget_class
def items(self):
for model_module, mm in sorted(self._registry.items()):
for model_version, mv in sorted(mm.items()):
for model_name, vm in sorted(mv.items()):
for view_module, vv in sorted(vm.items()):
for view_version, vn in sorted(vv.items()):
for view_name, widget in sorted(vn.items()):
yield (model_module, model_version, model_name, view_module, view_version, view_name), widget
def register(name=''):
"For backwards compatibility, we support @register(name) syntax."
def reg(widget):
"""A decorator registering a widget class in the widget registry."""
w = widget.class_traits()
Widget.widget_types.register(w['_model_module'].default_value,
w['_model_module_version'].default_value,
w['_model_name'].default_value,
w['_view_module'].default_value,
w['_view_module_version'].default_value,
w['_view_name'].default_value,
widget)
return widget
if isinstance(name, str):
import warnings
warnings.warn("Widget registration using a string name has been deprecated. Widget registration now uses a plain `@register` decorator.", DeprecationWarning)
return reg
else:
return reg(name)
class Widget(LoggingHasTraits):
#-------------------------------------------------------------------------
# Class attributes
#-------------------------------------------------------------------------
_widget_construction_callback = None
# widgets is a dictionary of all active widget objects
widgets = {}
# widget_types is a registry of widgets by module, version, and name:
widget_types = WidgetRegistry()
@classmethod
def close_all(cls):
for widget in list(cls.widgets.values()):
widget.close()
@staticmethod
def on_widget_constructed(callback):
"""Registers a callback to be called when a widget is constructed.
The callback must have the following signature:
callback(widget)"""
Widget._widget_construction_callback = callback
@staticmethod
def _call_widget_constructed(widget):
"""Static method, called when a widget is constructed."""
if Widget._widget_construction_callback is not None and callable(Widget._widget_construction_callback):
Widget._widget_construction_callback(widget)
@staticmethod
def handle_comm_opened(comm, msg):
"""Static method, called when a widget is constructed."""
version = msg.get('metadata', {}).get('version', '')
if version.split('.')[0] != PROTOCOL_VERSION_MAJOR:
raise ValueError("Incompatible widget protocol versions: received version %r, expected version %r"%(version, __protocol_version__))
data = msg['content']['data']
state = data['state']
# Find the widget class to instantiate in the registered widgets
widget_class = Widget.widget_types.get(state['_model_module'],
state['_model_module_version'],
state['_model_name'],
state['_view_module'],
state['_view_module_version'],
state['_view_name'])
widget = widget_class(comm=comm)
if 'buffer_paths' in data:
_put_buffers(state, data['buffer_paths'], msg['buffers'])
widget.set_state(state)
@staticmethod
def get_manager_state(drop_defaults=False, widgets=None):
"""Returns the full state for a widget manager for embedding
:param drop_defaults: when True, it will not include default value
:param widgets: list with widgets to include in the state (or all widgets when None)
:return:
"""
state = {}
if widgets is None:
widgets = Widget.widgets.values()
for widget in widgets:
state[widget.model_id] = widget._get_embed_state(drop_defaults=drop_defaults)
return {'version_major': 2, 'version_minor': 0, 'state': state}
def _get_embed_state(self, drop_defaults=False):
state = {
'model_name': self._model_name,
'model_module': self._model_module,
'model_module_version': self._model_module_version
}
model_state, buffer_paths, buffers = _remove_buffers(self.get_state(drop_defaults=drop_defaults))
state['state'] = model_state
if len(buffers) > 0:
state['buffers'] = [{'encoding': 'base64',
'path': p,
'data': standard_b64encode(d).decode('ascii')}
for p, d in zip(buffer_paths, buffers)]
return state
def get_view_spec(self):
return dict(version_major=2, version_minor=0, model_id=self._model_id)
#-------------------------------------------------------------------------
# Traits
#-------------------------------------------------------------------------
_model_name = Unicode('WidgetModel',
help="Name of the model.", read_only=True).tag(sync=True)
_model_module = Unicode('@jupyter-widgets/base',
help="The namespace for the model.", read_only=True).tag(sync=True)
_model_module_version = Unicode(__jupyter_widgets_base_version__,
help="A semver requirement for namespace version containing the model.", read_only=True).tag(sync=True)
_view_name = Unicode(None, allow_none=True,
help="Name of the view.").tag(sync=True)
_view_module = Unicode(None, allow_none=True,
help="The namespace for the view.").tag(sync=True)
_view_module_version = Unicode('',
help="A semver requirement for the namespace version containing the view.").tag(sync=True)
_view_count = Int(None, allow_none=True,
help="EXPERIMENTAL: The number of views of the model displayed in the frontend. This attribute is experimental and may change or be removed in the future. None signifies that views will not be tracked. Set this to 0 to start tracking view creation/deletion.").tag(sync=True)
comm = Instance('ipykernel.comm.Comm', allow_none=True)
keys = List(help="The traits which are synced.")
@default('keys')
def _default_keys(self):
return [name for name in self.traits(sync=True)]
_property_lock = Dict()
_holding_sync = False
_states_to_send = Set()
_display_callbacks = Instance(CallbackDispatcher, ())
_msg_callbacks = Instance(CallbackDispatcher, ())
#-------------------------------------------------------------------------
# (Con/de)structor
#-------------------------------------------------------------------------
def __init__(self, **kwargs):
"""Public constructor"""
self._model_id = kwargs.pop('model_id', None)
super().__init__(**kwargs)
Widget._call_widget_constructed(self)
self.open()
def __del__(self):
"""Object disposal"""
self.close()
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
def open(self):
"""Open a comm to the frontend if one isn't already open."""
if self.comm is None:
state, buffer_paths, buffers = _remove_buffers(self.get_state())
args = dict(target_name='jupyter.widget',
data={'state': state, 'buffer_paths': buffer_paths},
buffers=buffers,
metadata={'version': __protocol_version__}
)
if self._model_id is not None:
args['comm_id'] = self._model_id
self.comm = Comm(**args)
@observe('comm')
def _comm_changed(self, change):
"""Called when the comm is changed."""
if change['new'] is None:
return
self._model_id = self.model_id
self.comm.on_msg(self._handle_msg)
Widget.widgets[self.model_id] = self
@property
def model_id(self):
"""Gets the model id of this widget.
If a Comm doesn't exist yet, a Comm will be created automagically."""
return self.comm.comm_id
#-------------------------------------------------------------------------
# Methods
#-------------------------------------------------------------------------
def close(self):
"""Close method.
Closes the underlying comm.
When the comm is closed, all of the widget views are automatically
removed from the front-end."""
if self.comm is not None:
Widget.widgets.pop(self.model_id, None)
self.comm.close()
self.comm = None
self._ipython_display_ = None
def send_state(self, key=None):
"""Sends the widget state, or a piece of it, to the front-end, if it exists.
Parameters
----------
key : unicode, or iterable (optional)
A single property's name or iterable of property names to sync with the front-end.
"""
state = self.get_state(key=key)
if len(state) > 0:
if self._property_lock: # we need to keep this dict up to date with the front-end values
for name, value in state.items():
if name in self._property_lock:
self._property_lock[name] = value
state, buffer_paths, buffers = _remove_buffers(state)
msg = {'method': 'update', 'state': state, 'buffer_paths': buffer_paths}
self._send(msg, buffers=buffers)
def get_state(self, key=None, drop_defaults=False):
"""Gets the widget state, or a piece of it.
Parameters
----------
key : unicode or iterable (optional)
A single property's name or iterable of property names to get.
Returns
-------
state : dict of states
metadata : dict
metadata for each field: {key: metadata}
"""
if key is None:
keys = self.keys
elif isinstance(key, str):
keys = [key]
elif isinstance(key, Iterable):
keys = key
else:
raise ValueError("key must be a string, an iterable of keys, or None")
state = {}
traits = self.traits()
for k in keys:
to_json = self.trait_metadata(k, 'to_json', self._trait_to_json)
value = to_json(getattr(self, k), self)
if not drop_defaults or not self._compare(value, traits[k].default_value):
state[k] = value
return state
def _is_numpy(self, x):
return x.__class__.__name__ == 'ndarray' and x.__class__.__module__ == 'numpy'
def _compare(self, a, b):
if self._is_numpy(a) or self._is_numpy(b):
import numpy as np
return np.array_equal(a, b)
else:
return a == b
def set_state(self, sync_data):
"""Called when a state is received from the front-end."""
# The order of these context managers is important. Properties must
# be locked when the hold_trait_notification context manager is
# released and notifications are fired.
with self._lock_property(**sync_data), self.hold_trait_notifications():
for name in sync_data:
if name in self.keys:
from_json = self.trait_metadata(name, 'from_json',
self._trait_from_json)
self.set_trait(name, from_json(sync_data[name], self))
def send(self, content, buffers=None):
"""Sends a custom msg to the widget model in the front-end.
Parameters
----------
content : dict
Content of the message to send.
buffers : list of binary buffers
Binary buffers to send with message
"""
self._send({"method": "custom", "content": content}, buffers=buffers)
def on_msg(self, callback, remove=False):
"""(Un)Register a custom msg receive callback.
Parameters
----------
callback: callable
callback will be passed three arguments when a message arrives::
callback(widget, content, buffers)
remove: bool
True if the callback should be unregistered."""
self._msg_callbacks.register_callback(callback, remove=remove)
def on_displayed(self, callback, remove=False):
"""(Un)Register a widget displayed callback.
Parameters
----------
callback: method handler
Must have a signature of::
callback(widget, **kwargs)
kwargs from display are passed through without modification.
remove: bool
True if the callback should be unregistered."""
self._display_callbacks.register_callback(callback, remove=remove)
def add_traits(self, **traits):
"""Dynamically add trait attributes to the Widget."""
super().add_traits(**traits)
for name, trait in traits.items():
if trait.get_metadata('sync'):
self.keys.append(name)
self.send_state(name)
def notify_change(self, change):
"""Called when a property has changed."""
# Send the state to the frontend before the user-registered callbacks
# are called.
name = change['name']
if self.comm is not None and self.comm.kernel is not None:
# Make sure this isn't information that the front-end just sent us.
if name in self.keys and self._should_send_property(name, getattr(self, name)):
# Send new state to front-end
self.send_state(key=name)
super().notify_change(change)
def __repr__(self):
return self._gen_repr_from_keys(self._repr_keys())
#-------------------------------------------------------------------------
# Support methods
#-------------------------------------------------------------------------
@contextmanager
def _lock_property(self, **properties):
"""Lock a property-value pair.
The value should be the JSON state of the property.
NOTE: This, in addition to the single lock for all state changes, is
flawed. In the future we may want to look into buffering state changes
back to the front-end."""
self._property_lock = properties
try:
yield
finally:
self._property_lock = {}
@contextmanager
def hold_sync(self):
"""Hold syncing any state until the outermost context manager exits"""
if self._holding_sync is True:
yield
else:
try:
self._holding_sync = True
yield
finally:
self._holding_sync = False
self.send_state(self._states_to_send)
self._states_to_send.clear()
def _should_send_property(self, key, value):
"""Check the property lock (property_lock)"""
to_json = self.trait_metadata(key, 'to_json', self._trait_to_json)
if key in self._property_lock:
# model_state, buffer_paths, buffers
split_value = _remove_buffers({ key: to_json(value, self)})
split_lock = _remove_buffers({ key: self._property_lock[key]})
# A roundtrip conversion through json in the comparison takes care of
# idiosyncracies of how python data structures map to json, for example
# tuples get converted to lists.
if (jsonloads(jsondumps(split_value[0])) == split_lock[0]
and split_value[1] == split_lock[1]
and _buffer_list_equal(split_value[2], split_lock[2])):
return False
if self._holding_sync:
self._states_to_send.add(key)
return False
else:
return True
# Event handlers
@_show_traceback
def _handle_msg(self, msg):
"""Called when a msg is received from the front-end"""
data = msg['content']['data']
method = data['method']
if method == 'update':
if 'state' in data:
state = data['state']
if 'buffer_paths' in data:
_put_buffers(state, data['buffer_paths'], msg['buffers'])
self.set_state(state)
# Handle a state request.
elif method == 'request_state':
self.send_state()
# Handle a custom msg from the front-end.
elif method == 'custom':
if 'content' in data:
self._handle_custom_msg(data['content'], msg['buffers'])
# Catch remainder.
else:
self.log.error('Unknown front-end to back-end widget msg with method "%s"' % method)
def _handle_custom_msg(self, content, buffers):
"""Called when a custom msg is received."""
self._msg_callbacks(self, content, buffers)
def _handle_displayed(self, **kwargs):
"""Called when a view has been displayed for this widget instance"""
self._display_callbacks(self, **kwargs)
@staticmethod
def _trait_to_json(x, self):
"""Convert a trait value to json."""
return x
@staticmethod
def _trait_from_json(x, self):
"""Convert json values to objects."""
return x
def _ipython_display_(self, **kwargs):
"""Called when `IPython.display.display` is called on the widget."""
plaintext = repr(self)
if len(plaintext) > 110:
plaintext = plaintext[:110] + '…'
data = {
'text/plain': plaintext,
}
if self._view_name is not None:
# The 'application/vnd.jupyter.widget-view+json' mimetype has not been registered yet.
# See the registration process and naming convention at
# http://tools.ietf.org/html/rfc6838
# and the currently registered mimetypes at
# http://www.iana.org/assignments/media-types/media-types.xhtml.
data['application/vnd.jupyter.widget-view+json'] = {
'version_major': 2,
'version_minor': 0,
'model_id': self._model_id
}
display(data, raw=True)
if self._view_name is not None:
self._handle_displayed(**kwargs)
def _send(self, msg, buffers=None):
"""Sends a message to the model in the front-end."""
if self.comm is not None and self.comm.kernel is not None:
self.comm.send(data=msg, buffers=buffers)
def _repr_keys(self):
traits = self.traits()
for key in sorted(self.keys):
# Exclude traits that start with an underscore
if key[0] == '_':
continue
# Exclude traits who are equal to their default value
value = getattr(self, key)
trait = traits[key]
if self._compare(value, trait.default_value):
continue
elif (isinstance(trait, (Container, Dict)) and
trait.default_value == Undefined and
(value is None or len(value) == 0)):
# Empty container, and dynamic default will be empty
continue
yield key
def _gen_repr_from_keys(self, keys):
class_name = self.__class__.__name__
signature = ', '.join(
'{}={!r}'.format(key, getattr(self, key))
for key in keys
)
return '{}({})'.format(class_name, signature)
|
py | 1a3d936b95aa025eeb775b67083d939c0c0004e4 | import os
import requests
from typing import Optional, Union, Iterable, Mapping, Sequence
from platypush.plugins import Plugin, action
from platypush.schemas.mastodon import MastodonSchema, MastodonSearchSchema, MastodonAccountCreationSchema, \
MastodonAccountSchema, MastodonStatusSchema, MastodonFeaturedHashtagSchema, MastodonAccountListSchema, \
MastodonFilterSchema, MastodonMediaSchema, MastodonConversationSchema, MastodonListSchema, \
MastodonNotificationSchema
from platypush.utils import get_mime_type
class MastodonPlugin(Plugin):
"""
Plugin to interact with `Mastodon <https://mastodon.social/about>`_ instances.
It requires an active API token associated to an app registered on the instance.
In order to get one:
- Open ``https://<mastodon-base-url>/settings/applications/``
- Create a new application
- Select the scopes relevant for your specific usage.
- Take note of the token reported on the *Your access token* row.
The notifications subscription service requires the ``ngrok`` plugin and the
`http` backend to be enabled, since we need to expose an external URL that
the Mastodon instance can call when new events occur.
"""
class SubscriptionConfig:
tunnel_url: str
local_port: int
auth_secret: str
private_key: str
public_key: str
server_key: str
def __init__(self, base_url: str, access_token: Optional[str] = None, **kwargs):
"""
:param base_url: Base URL of the Mastodon web server, in the form of ``https://<domain-name>``.
:param access_token: Access token as reported on ``https://<base_url>/settings/applications/<app_id>``.
"""
super().__init__(**kwargs)
self._base_url = base_url
self._access_token = access_token
self._subscription_config = self.SubscriptionConfig()
def base_url(self, version: str, base_url: Optional[str] = None) -> str:
return f'{base_url or self._base_url}/api/{version}'
def _run(
self, path: str, method: str = 'get', version: str = 'v2', headers: Optional[dict] = None,
base_url: Optional[str] = None, access_token: Optional[str] = None,
schema: Optional[MastodonSchema] = None, **kwargs
) -> Optional[Union[dict, list]]:
headers = {
'Authorization': f'Bearer {access_token or self._access_token}',
'Accept': 'application/json',
**(headers or {}),
}
method = getattr(requests, method.lower())
rs = method(self.base_url(base_url=base_url, version=version) + '/' + path, headers=headers, **kwargs)
rs.raise_for_status()
rs = rs.json()
if schema:
rs = schema.dump(rs)
return rs
# noinspection PyShadowingBuiltins
@action
def search(
self, query: str, type: Optional[str] = None, min_id: Optional[str] = None,
max_id: Optional[str] = None, limit: int = 20, offset: int = 0, following: bool = False,
**kwargs
) -> Mapping[str, Iterable[dict]]:
"""
Perform a search.
:param query: Search query.
:param type: Filter by type. Supported types:
- ``accounts``
- ``hashtags``
- ``statuses``
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param following: Only return results from accounts followed by the user (default: False).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonSearchSchema
"""
return self._run(
'search',
version='v2',
schema=MastodonSearchSchema(),
params={
'q': query,
**({'type': type} if type else {}),
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
**({'following': following} if following else {}),
}, **kwargs
)
@action
def register_account(
self, username: str, email: str, password: str, locale: str = 'en',
reason: Optional[str] = None, **kwargs
) -> dict:
"""
Register a new account.
It requires the specified API token to have ``write:accounts`` permissions.
:param username: User name.
:param email: User's email address (must be a valid address).
:param password: The password used for the first login.
:param locale: Language/encoding for the confirmation email.
:param reason: Text that will be reviewed by moderators if registrations require manual approval.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountCreationSchema
"""
return self._run(
'accounts',
method='post',
version='v1',
schema=MastodonAccountCreationSchema(),
json={
'username': username,
'email': email,
'password': password,
'locale': locale,
'reason': reason,
'agreement': True,
}, **kwargs
)
@action
def update_account(
self, discoverable: Optional[bool] = None, bot: Optional[bool] = None,
display_name: Optional[str] = None, note: Optional[str] = None,
avatar: Optional[str] = None, header: Optional[str] = None,
locked: Optional[bool] = None, privacy: Optional[str] = None,
sensitive: Optional[bool] = None, language: Optional[str] = None,
metadata: Optional[Iterable[Mapping]] = None, **kwargs
) -> dict:
"""
Updates the properties of the account associated to the access token.
It requires the specified API token to have ``write:accounts`` permissions.
:param discoverable: Whether the account should be shown in the profile directory.
:param bot: Whether the account is a bot.
:param display_name: The display name to use for the profile.
:param note: The account bio (HTML is supported).
:param avatar: Path to an avatar image.
:param header: Path to a header image.
:param locked: Whether manual approval of follow requests is required.
:param privacy: Default post privacy for authored statuses.
:param sensitive: Whether to mark authored statuses as sensitive by default.
:param language: Default language to use for authored statuses (ISO 6391 code).
:param metadata: Profile metadata items with ``name`` and ``value``.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema
"""
avatar = os.path.expanduser(avatar) if avatar else None
header = os.path.expanduser(header) if header else None
return self._run(
'accounts/update_credentials',
method='patch',
version='v1',
schema=MastodonAccountSchema(),
data={
**({'discoverable': discoverable} if discoverable is not None else {}),
**({'bot': bot} if bot is not None else {}),
**({'display_name': display_name} if display_name is not None else {}),
**({'note': note} if note is not None else {}),
**({'locked': locked} if locked is not None else {}),
**({'source[privacy]': privacy} if privacy is not None else {}),
**({'source[sensitive]': sensitive} if sensitive is not None else {}),
**({'source[language]': language} if language is not None else {}),
**({'fields_attributes': metadata} if metadata is not None else {}),
},
files={
**({'avatar': (
os.path.basename(avatar), open(avatar, 'rb'), get_mime_type(avatar)
)} if avatar is not None else {}),
**({'header': (
os.path.basename(header), open(header, 'rb'), get_mime_type(header)
)} if header is not None else {}),
},
**kwargs
)
@action
def get_account(self, account_id: str, **kwargs) -> dict:
"""
Retrieve an account by ID.
It requires the specified API token to have ``read:accounts`` permissions.
:param account_id: Account ID to retrieve.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema
"""
return self._run(
f'accounts/{account_id}',
version='v1',
schema=MastodonAccountSchema(),
**kwargs
)
@action
def get_statuses(self, account_id: str, min_id: Optional[str] = None, max_id: Optional[str] = None,
limit: int = 20, offset: int = 0, **kwargs) -> Iterable[dict]:
"""
Retrieve statuses by account ID.
It requires the specified API token to have the ``read:statuses`` permission.
:param account_id: Account ID.
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema(many=True)
"""
return self._run(
f'accounts/{account_id}/statuses',
version='v1',
schema=MastodonStatusSchema(many=True),
params={
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
},
**kwargs
)
@action
def get_followers(self, account_id: str, max_id: Optional[str] = None,
since_id: Optional[str] = None, limit: int = 20, offset: int = 0,
**kwargs) -> Iterable[dict]:
"""
Retrieve the list of followers of an account.
It requires the specified API token to have the ``read:accounts`` permission.
:param account_id: Account ID.
:param max_id: Return results older than this ID.
:param since_id: Return results newer than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema(many=True)
"""
return self._run(
f'accounts/{account_id}/followers',
version='v1',
schema=MastodonAccountSchema(many=True),
params={
**({'since_id': since_id} if since_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
},
**kwargs
)
@action
def get_following(self, account_id: str, max_id: Optional[str] = None,
since_id: Optional[str] = None, limit: int = 20, offset: int = 0,
**kwargs) -> Iterable[dict]:
"""
Retrieve the list of accounts followed by a specified account.
It requires the specified API token to have the ``read:accounts`` permission.
:param account_id: Account ID.
:param max_id: Return results older than this ID.
:param since_id: Return results newer than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema(many=True)
"""
return self._run(
f'accounts/{account_id}/following',
version='v1',
schema=MastodonAccountSchema(many=True),
params={
**({'since_id': since_id} if since_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
},
**kwargs
)
@action
def get_featured_tags(self, account_id: Optional[str] = None, max_id: Optional[str] = None,
since_id: Optional[str] = None, limit: int = 20, offset: int = 0,
**kwargs) -> Iterable[dict]:
"""
Retrieve the list of featured hashtags of an account.
It requires the specified API token to have the ``read:accounts`` permission.
:param account_id: Account ID (if not specified then retrieve the featured tags of the current account).
:param max_id: Return results older than this ID.
:param since_id: Return results newer than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonFeaturedHashtagSchema(many=True)
"""
return self._run(
f'accounts/{account_id}/featured_tags' if account_id else 'featured_tags',
version='v1',
schema=MastodonFeaturedHashtagSchema(many=True),
params={
**({'since_id': since_id} if since_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
},
**kwargs
)
@action
def get_featured_lists(self, account_id: str, max_id: Optional[str] = None,
since_id: Optional[str] = None, limit: int = 20, offset: int = 0,
**kwargs) -> Iterable[dict]:
"""
Retrieve the list that you have added a certain account to.
It requires the specified API token to have the ``read:lists`` permission.
:param account_id: Account ID.
:param max_id: Return results older than this ID.
:param since_id: Return results newer than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountListSchema(many=True)
"""
return self._run(
f'accounts/{account_id}/lists',
version='v1',
schema=MastodonAccountListSchema(many=True),
params={
**({'since_id': since_id} if since_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
},
**kwargs
)
@action
def follow_account(self, account_id: str, notify: bool = False, reblogs: bool = True, **kwargs):
"""
Follow a given account ID.
It requires the specified API token to have the ``write:follows`` permission.
:param account_id: Account ID.
:param notify: Receive notifications when this account posts a new status (default: False).
:param reblogs: Receive this account's reblogs on your timeline (default: True).
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/follow',
version='v1',
method='post',
json={'notify': notify, 'reblogs': reblogs},
**kwargs
)
@action
def unfollow_account(self, account_id: str, **kwargs):
"""
Unfollow a given account ID.
It requires the specified API token to have the ``write:follows`` permission.
:param account_id: Account ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/unfollow',
version='v1',
method='post',
**kwargs
)
@action
def block_account(self, account_id: str, **kwargs):
"""
Block a given account ID.
It requires the specified API token to have the ``write:blocks`` permission.
:param account_id: Account ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/block',
version='v1',
method='post',
**kwargs
)
@action
def unblock_account(self, account_id: str, **kwargs):
"""
Unblock a given account ID.
It requires the specified API token to have the ``write:blocks`` permission.
:param account_id: Account ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/unblock',
version='v1',
method='post',
**kwargs
)
@action
def mute_account(self, account_id: str, **kwargs):
"""
Mute a given account ID.
It requires the specified API token to have the ``write:mutes`` permission.
:param account_id: Account ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/mute',
version='v1',
method='post',
**kwargs
)
@action
def unmute_account(self, account_id: str, **kwargs):
"""
Unmute a given account ID.
It requires the specified API token to have the ``write:mutes`` permission.
:param account_id: Account ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/unmute',
version='v1',
method='post',
**kwargs
)
@action
def pin_account(self, account_id: str, **kwargs):
"""
Pin a given account ID to your profile.
It requires the specified API token to have the ``write:accounts`` permission.
:param account_id: Account ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/pin',
version='v1',
method='post',
**kwargs
)
@action
def unpin_account(self, account_id: str, **kwargs):
"""
Unpin a given account ID from your profile.
It requires the specified API token to have the ``write:accounts`` permission.
:param account_id: Account ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/unpin',
version='v1',
method='post',
**kwargs
)
@action
def set_account_note(self, account_id: str, note: str, **kwargs):
"""
Set a private note for an account.
It requires the specified API token to have the ``write:accounts`` permission.
:param account_id: Account ID.
:param note: Note content (HTML is supported).
:param kwargs: ``base_url``/``access_token`` override.
"""
self._run(
f'accounts/{account_id}/note',
version='v1',
method='post',
json={'comment': note},
**kwargs
)
@action
def get_bookmarked_statuses(self, min_id: Optional[str] = None,
max_id: Optional[str] = None, limit: int = 20, **kwargs) -> Iterable[dict]:
"""
Retrieve the list of statuses bookmarked by the user.
It requires the specified API token to have the ``read:bookmarks`` permission.
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema(many=True)
"""
return self._run(
'bookmarks',
version='v1',
schema=MastodonStatusSchema(many=True),
params={
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
},
**kwargs
)
@action
def get_favourited_statuses(self, min_id: Optional[str] = None,
max_id: Optional[str] = None, limit: int = 20, **kwargs) -> Iterable[dict]:
"""
Retrieve the list of statuses favourited by the account.
It requires the specified API token to have the ``read:favourites`` permission.
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema(many=True)
"""
return self._run(
'favourites',
version='v1',
schema=MastodonStatusSchema(many=True),
params={
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
},
**kwargs
)
@action
def get_muted_accounts(self, max_id: Optional[str] = None,
since_id: Optional[str] = None, limit: int = 20,
**kwargs) -> Iterable[dict]:
"""
Retrieve the list of muted accounts.
It requires the specified API token to have the ``read:mutes`` permission.
:param max_id: Return results older than this ID.
:param since_id: Return results newer than this ID.
:param limit: Maximum number of results (default: 20).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema(many=True)
"""
return self._run(
'mutes',
version='v1',
schema=MastodonAccountSchema(many=True),
params={
**({'since_id': since_id} if since_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
},
**kwargs
)
@action
def get_blocked_accounts(self, max_id: Optional[str] = None,
since_id: Optional[str] = None, limit: int = 20,
**kwargs) -> Iterable[dict]:
"""
Retrieve the list of blocked accounts.
It requires the specified API token to have the ``read:blocks`` permission.
:param max_id: Return results older than this ID.
:param since_id: Return results newer than this ID.
:param limit: Maximum number of results (default: 20).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema(many=True)
"""
return self._run(
'blocks',
version='v1',
schema=MastodonAccountSchema(many=True),
params={
**({'since_id': since_id} if since_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
},
**kwargs
)
@action
def get_filters(self, **kwargs) -> Iterable[dict]:
"""
Retrieve the list of filters created by the account.
It requires the specified API token to have the ``read:filters`` permission.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonFilterSchema(many=True)
"""
return self._run(
'filters',
version='v1',
schema=MastodonFilterSchema(many=True),
**kwargs
)
@action
def create_filter(self, phrase: str, context: Iterable[str],
irreversible: Optional[bool] = None,
whole_word: Optional[bool] = None,
expires_in: Optional[int] = None,
**kwargs) -> dict:
"""
Create a new filter.
It requires the specified API token to have the ``write:filters`` permission.
:param phrase: Text to be filtered.
:param context: Array of enumerable strings: ``home``, ``notifications``, ``public``, ``thread``.
At least one context must be specified.
:param irreversible: Should the server irreversibly drop matching entities from home and notifications?
:param whole_word: Consider word boundaries?
:param expires_in: Expires in the specified number of seconds.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonFilterSchema
"""
return self._run(
'filters',
version='v1',
method='post',
schema=MastodonFilterSchema(),
json={
'phrase': phrase,
'context': context,
**({'irreversible': irreversible} if irreversible is not None else {}),
**({'whole_word': whole_word} if whole_word is not None else {}),
**({'expires_in': expires_in} if expires_in is not None else {}),
},
**kwargs
)
@action
def update_filter(self, filter_id: int,
phrase: Optional[str] = None,
context: Optional[Iterable[str]] = None,
irreversible: Optional[bool] = None,
whole_word: Optional[bool] = None,
expires_in: Optional[int] = None,
**kwargs) -> dict:
"""
Update a filter.
It requires the specified API token to have the ``write:filters`` permission.
:param filter_id: Filter ID.
:param phrase: Text to be filtered.
:param context: Array of enumerable strings: ``home``, ``notifications``, ``public``, ``thread``.
At least one context must be specified.
:param irreversible: Should the server irreversibly drop matching entities from home and notifications?
:param whole_word: Consider word boundaries?
:param expires_in: Expires in the specified number of seconds.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonFilterSchema
"""
return self._run(
f'filters/{filter_id}',
version='v1',
method='put',
schema=MastodonFilterSchema(),
json={
**({'phrase': phrase} if phrase is not None else {}),
**({'context': context} if context is not None else {}),
**({'irreversible': irreversible} if irreversible is not None else {}),
**({'whole_word': whole_word} if whole_word is not None else {}),
**({'expires_in': expires_in} if expires_in is not None else {}),
},
**kwargs
)
@action
def remove_filter(self, filter_id: int, **kwargs):
"""
Remove a filter.
It requires the specified API token to have the ``write:filters`` permission.
:param filter_id: Filter ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'filters/{filter_id}',
version='v1',
method='delete',
**kwargs
)
@action
def add_featured_tag(self, name: str, **kwargs) -> dict:
"""
Add a featured tag to the current account.
It requires the specified API token to have the ``write:accounts`` permission.
:param name: Hashtag name.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonFeaturedHashtagSchema
"""
return self._run(
'featured_tags',
version='v1',
method='post',
schema=MastodonFeaturedHashtagSchema(),
json={'name': name},
**kwargs
)
@action
def remove_featured_tag(self, tag_id: int, **kwargs):
"""
Remove a featured tag from the current account.
It requires the specified API token to have the ``write:accounts`` permission.
:param tag_id: Hashtag ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'featured_tags/{tag_id}',
version='v1',
method='delete',
**kwargs
)
@action
def publish_status(self, status: str, in_reply_to_id: Optional[str] = None,
media_ids: Optional[Iterable[str]] = None,
sensitive: Optional[bool] = None, spoiler_text: Optional[str] = None,
visibility: Optional[str] = None, **kwargs) -> dict:
"""
Publish a new status.
It requires the specified API token to have the ``write:statuses`` permission.
:param status: Content of the status to publish.
:param in_reply_to_id: Post the status in reply to this status ID.
:param media_ids: Optional list of media IDs to add as attachments.
:param sensitive: Set to true if sensitive.
:param spoiler_text: Set for optional spoiler text.
:param visibility: Supported values: ``public``, ``unlisted``, ``private`` and ``direct``.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema
"""
return self._run(
'statuses',
version='v1',
method='post',
schema=MastodonStatusSchema(),
json={
'status': status,
**({'in_reply_to_id': in_reply_to_id} if in_reply_to_id is None else {}),
**({'media_ids': media_ids} if media_ids else {}),
**({'sensitive': sensitive} if sensitive is None else {}),
**({'spoiler_text': spoiler_text} if spoiler_text is None else {}),
**({'visibility': visibility} if visibility is None else {}),
},
**kwargs
)
@action
def get_status(self, status_id: str, **kwargs) -> dict:
"""
Get a status by ID.
It requires the specified API token to have the ``read:statuses`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema
"""
return self._run(
f'statuses/{status_id}',
version='v1',
schema=MastodonStatusSchema(),
**kwargs
)
@action
def remove_status(self, status_id: str, **kwargs):
"""
Remove a status by ID.
It requires the specified API token to have the ``read:statuses`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}',
version='v1',
method='delete',
**kwargs
)
@action
def add_favourite_status(self, status_id: str, **kwargs):
"""
Favourite a status.
It requires the specified API token to have the ``write:favourites`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/favourite',
version='v1',
method='post',
**kwargs
)
@action
def remove_favourite_status(self, status_id: str, **kwargs):
"""
Undo a status favourite action.
It requires the specified API token to have the ``write:favourites`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/favourite',
version='v1',
method='delete',
**kwargs
)
@action
def reblog_status(self, status_id: str, **kwargs):
"""
Reblog (a.k.a. reshare/boost) a status.
It requires the specified API token to have the ``write:statuses`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/reblog',
version='v1',
method='post',
**kwargs
)
@action
def undo_reblog_status(self, status_id: str, **kwargs):
"""
Undo a status reblog.
It requires the specified API token to have the ``write:statuses`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/unreblog',
version='v1',
method='post',
**kwargs
)
@action
def bookmark_status(self, status_id: str, **kwargs):
"""
Add a status to the bookmarks.
It requires the specified API token to have the ``write:bookmarks`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/bookmark',
version='v1',
method='post',
**kwargs
)
@action
def undo_bookmark_status(self, status_id: str, **kwargs):
"""
Remove a status from the bookmarks.
It requires the specified API token to have the ``write:bookmarks`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/unbookmark',
version='v1',
method='post',
**kwargs
)
@action
def mute_status(self, status_id: str, **kwargs):
"""
Mute updates on a status.
It requires the specified API token to have the ``write:mutes`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/mute',
version='v1',
method='post',
**kwargs
)
@action
def unmute_status(self, status_id: str, **kwargs):
"""
Restore updates on a status.
It requires the specified API token to have the ``write:mutes`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/unmute',
version='v1',
method='post',
**kwargs
)
@action
def pin_status(self, status_id: str, **kwargs):
"""
Pin a status to the profile.
It requires the specified API token to have the ``write:accounts`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/pin',
version='v1',
method='post',
**kwargs
)
@action
def unpin_status(self, status_id: str, **kwargs):
"""
Remove a pinned status.
It requires the specified API token to have the ``write:accounts`` permission.
:param status_id: Status ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'statuses/{status_id}/unpin',
version='v1',
method='post',
**kwargs
)
@action
def upload_media(self, file: str, description: Optional[str] = None,
thumbnail: Optional[str] = None, **kwargs) -> dict:
"""
Upload media that can be used as attachments.
It requires the specified API token to have the ``write:media`` permission.
:param file: Path to the file to upload.
:param thumbnail: Path to the file thumbnail.
:param description: Optional attachment description.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonMediaSchema
"""
file_path = os.path.expanduser(file)
thumbnail_path = os.path.expanduser(thumbnail) if thumbnail else None
return self._run(
'media',
version='v1',
method='post',
schema=MastodonMediaSchema(),
data={
**({'description': description} if description else {}),
},
files={
'file': (
os.path.basename(file_path), open(file_path, 'rb'), get_mime_type(file_path)
),
**(
{
'thumbnail': (
os.path.basename(thumbnail_path),
open(os.path.expanduser(thumbnail_path), 'rb'),
get_mime_type(thumbnail_path)
),
} if thumbnail_path else {}
),
},
**kwargs
)
@action
def update_media(self, media_id: str, file: Optional[str] = None, description: Optional[str] = None,
thumbnail: Optional[str] = None, **kwargs) -> dict:
"""
Update a media attachment.
It requires the specified API token to have the ``write:media`` permission.
:param media_id: Media ID to update.
:param file: Path to the new file.
:param description: New description.
:param thumbnail: Path to the new thumbnail.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonMediaSchema
"""
file = os.path.expanduser(file)
thumbnail = os.path.expanduser(thumbnail)
return self._run(
f'media/{media_id}',
version='v1',
method='put',
schema=MastodonMediaSchema(),
data={
**({'description': description} if description else {}),
},
files={
'file': (
os.path.basename(file), open(file, 'rb'), get_mime_type(file)
),
**(
{
'thumbnail': (
os.path.basename(thumbnail),
open(os.path.expanduser(thumbnail), 'rb'),
get_mime_type(thumbnail)
),
} if thumbnail else {}
),
},
**kwargs
)
@action
def get_public_timeline(
self, local: bool = False, remote: bool = False, only_media: bool = False,
min_id: Optional[str] = None, max_id: Optional[str] = None, limit: int = 20,
offset: int = 0, **kwargs
) -> Iterable[dict]:
"""
Get a list of statuses from the public timeline.
It requires the specified API token to have the ``read:statuses`` permission.
:param local: Retrieve only local statuses (default: ``False``).
:param remote: Retrieve only remote statuses (default: ``False``).
:param only_media: Retrieve only statuses with media attached (default: ``False``).
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema(many=True)
"""
return self._run(
'timelines/public',
version='v1',
schema=MastodonStatusSchema(many=True),
params={
**({'local': local} if local else {}),
**({'remote': remote} if remote else {}),
**({'only_media': only_media} if only_media else {}),
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
}, **kwargs
)
@action
def get_hashtag_timeline(
self, hashtag: str, local: bool = False, only_media: bool = False,
min_id: Optional[str] = None, max_id: Optional[str] = None, limit: int = 20,
offset: int = 0, **kwargs
) -> Iterable[dict]:
"""
Get a list of statuses associated to a hashtag.
It requires the specified API token to have the ``read:statuses`` permission.
:param hashtag: Hashtag to search.
:param local: Retrieve only local statuses (default: ``False``).
:param only_media: Retrieve only statuses with media attached (default: ``False``).
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema(many=True)
"""
return self._run(
f'timelines/tag/{hashtag}',
version='v1',
schema=MastodonStatusSchema(many=True),
params={
**({'local': local} if local else {}),
**({'only_media': only_media} if only_media else {}),
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
}, **kwargs
)
@action
def get_home_timeline(
self, local: bool = False, only_media: bool = False,
min_id: Optional[str] = None, max_id: Optional[str] = None, limit: int = 20,
offset: int = 0, **kwargs
) -> Iterable[dict]:
"""
Get a list of statuses from the followed users.
It requires the specified API token to have the ``read:statuses`` permission.
:param local: Retrieve only local statuses (default: ``False``).
:param only_media: Retrieve only statuses with media attached (default: ``False``).
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema(many=True)
"""
return self._run(
f'timelines/home',
version='v1',
schema=MastodonStatusSchema(many=True),
params={
**({'local': local} if local else {}),
**({'only_media': only_media} if only_media else {}),
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
}, **kwargs
)
@action
def get_list_timeline(
self, list_id: str,
min_id: Optional[str] = None, max_id: Optional[str] = None, limit: int = 20,
offset: int = 0, **kwargs
) -> Iterable[dict]:
"""
Get a list of statuses from a list timeline.
It requires the specified API token to have the ``read:lists`` permission.
:param list_id: List ID.
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param offset: Return results from this offset (default: 0).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonStatusSchema(many=True)
"""
return self._run(
f'timelines/list/{list_id}',
version='v1',
schema=MastodonStatusSchema(many=True),
params={
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
**({'offset': offset} if offset else {}),
}, **kwargs
)
@action
def get_conversations(
self, min_id: Optional[str] = None, max_id: Optional[str] = None,
limit: int = 20, **kwargs
) -> Iterable[dict]:
"""
Get a list of user conversations.
It requires the specified API token to have the ``read:statuses`` permission.
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonConversationSchema(many=True)
"""
return self._run(
'conversations',
version='v1',
schema=MastodonConversationSchema(many=True),
params={
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
}, **kwargs
)
@action
def remove_conversation(self, conversation_id: int, **kwargs):
"""
Remove a conversation by ID.
It requires the specified API token to have the ``write_conversations`` permission.
:param conversation_id: Conversation ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'conversations/{conversation_id}',
version='v1',
method='delete',
**kwargs
)
@action
def mark_conversation_as_read(self, conversation_id: int, **kwargs):
"""
Mark a conversation as read.
It requires the specified API token to have the ``write_conversations`` permission.
:param conversation_id: Conversation ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'conversations/{conversation_id}/read',
version='v1',
method='post',
**kwargs
)
@action
def get_lists(self, list_id: Optional[int] = None, **kwargs) -> Union[dict, Iterable[dict]]:
"""
Get the lists owned by the logged user.
It requires the specified API token to have the ``read:lists`` permission.
:param list_id: Retrieve a specific list ID (default: retrieve all).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonListSchema(many=True)
"""
return self._run(
'lists' + (f'/{list_id}' if list_id else ''),
version='v1',
method='get',
schema=MastodonListSchema(many=list_id is None),
**kwargs
)
@action
def create_list(self, title: str, replies_policy: str = 'list', **kwargs) -> dict:
"""
Create a new list.
It requires the specified API token to have the ``write:lists`` permission.
:param title: List title.
:param replies_policy: Possible values: ``none``, ``following`` or ``list``. Default: ``list``.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonListSchema
"""
return self._run(
'lists',
version='v1',
method='post',
schema=MastodonListSchema(),
data={'title': title, 'replies_policy': replies_policy},
**kwargs
)
@action
def update_list(
self, list_id: int, title: Optional[str], replies_policy: Optional[str] = None, **kwargs
) -> dict:
"""
Update a list.
It requires the specified API token to have the ``write:lists`` permission.
:param list_id: List ID.
:param title: New list title.
:param replies_policy: New replies policy.
Possible values: ``none``, ``following`` or ``list``. Default: ``list``.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonListSchema
"""
return self._run(
f'lists/{list_id}',
version='v1',
method='put',
schema=MastodonListSchema(),
data={
**({'title': title} if title else {}),
**({'replies_policy': replies_policy} if replies_policy else {}),
},
**kwargs
)
@action
def delete_list(self, list_id: int, **kwargs):
"""
Delete a list.
It requires the specified API token to have the ``write:lists`` permission.
:param list_id: List ID.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
f'lists/{list_id}',
version='v1',
method='delete',
**kwargs
)
@action
def get_list_accounts(
self, list_id: Optional[int] = None, min_id: Optional[str] = None,
max_id: Optional[str] = None, limit: int = 20, **kwargs
) -> Iterable[dict]:
"""
Get the accounts in a list.
It requires the specified API token to have the ``read:lists`` permission.
:param list_id: List ID.
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonAccountSchema(many=True)
"""
return self._run(
f'lists/{list_id}/accounts',
version='v1',
method='get',
schema=MastodonAccountSchema(many=True),
params={
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
},
**kwargs
)
@action
def add_accounts_to_list(self, list_id: int, account_ids: Sequence[str], **kwargs):
"""
Add accounts to a list.
It requires the specified API token to have the ``write:lists`` permission.
:param list_id: List ID.
:param account_ids: Accounts that should be added.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonListSchema
"""
return self._run(
f'lists/{list_id}/accounts',
version='v1',
method='post',
data={'account_ids': account_ids},
**kwargs
)
@action
def remove_accounts_from_list(self, list_id: int, account_ids: Sequence[str], **kwargs):
"""
Remove accounts from a list
It requires the specified API token to have the ``write:lists`` permission.
:param list_id: List ID.
:param account_ids: Accounts that should be removed.
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonListSchema
"""
return self._run(
f'lists/{list_id}/accounts',
version='v1',
method='delete',
data={'account_ids': account_ids},
**kwargs
)
@action
def get_notifications(
self, notification_id: Optional[str] = None, min_id: Optional[str] = None,
max_id: Optional[str] = None, limit: int = 20, **kwargs
) -> Union[dict, Iterable[dict]]:
"""
Get the list of notifications of the user.
It requires the specified API token to have the ``read:notifications`` permission.
:param notification_id: If specified then retrieve only the notification associated to this ID.
:param min_id: Return results newer than this ID.
:param max_id: Return results older than this ID.
:param limit: Maximum number of results (default: 20).
:param kwargs: ``base_url``/``access_token`` override.
:return: .. schema:: mastodon.MastodonNotificationSchema(many=True)
"""
rs = self._run(
'notifications' + (f'/{notification_id}' if notification_id else ''),
version='v1',
method='get',
schema=MastodonNotificationSchema(many=notification_id is None),
params={
**({'min_id': min_id} if min_id else {}),
**({'max_id': max_id} if max_id else {}),
**({'limit': limit} if limit else {}),
},
**kwargs
)
return rs
@action
def dismiss_notifications(self, notification_id: Optional[str] = None, **kwargs):
"""
Dismiss notifications.
It requires the specified API token to have the ``write:notifications`` permission.
:param notification_id: Dismiss only this notification.
:param kwargs: ``base_url``/``access_token`` override.
"""
return self._run(
'notifications/' + (
f'{notification_id}/dismiss' if notification_id else 'clear'
),
version='v1',
method='post',
**kwargs
)
# vim:sw=4:ts=4:et:
|
py | 1a3d93af3dfd5f6f809884118e7aa0d8e05e3575 | #
# Copyright 2017-2018 Stanislav Pidhorskyi. All rights reserved.
# License: https://raw.githubusercontent.com/podgorskiy/impy/master/LICENSE.txt
#
from setuptools import setup, Extension, find_packages
from distutils.errors import *
from distutils.dep_util import newer_group
from distutils import log
from distutils.command.build_ext import build_ext
from codecs import open
import os
import sys
import platform
import re
sys._argv = sys.argv[:]
sys.argv=[sys.argv[0], '--root', 'gl3w/']
try:
from gl3w import gl3w_gen
except:
sys.path.insert(0, './gl3w')
import gl3w_gen
sys.argv = sys._argv
target_os = 'none'
if sys.platform == 'darwin':
target_os = 'darwin'
elif os.name == 'posix':
target_os = 'posix'
elif platform.system() == 'Windows':
target_os = 'win32'
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
def filter_sources(sources):
"""Filters sources into c, cpp and objc"""
cpp_ext_match = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
c_ext_match = re.compile(r'.*[.](c|C)\Z', re.I).match
objc_ext_match = re.compile(r'.*[.]m\Z', re.I).match
c_sources = []
cpp_sources = []
objc_sources = []
other_sources = []
for source in sources:
if c_ext_match(source):
c_sources.append(source)
elif cpp_ext_match(source):
cpp_sources.append(source)
elif objc_ext_match(source):
objc_sources.append(source)
else:
other_sources.append(source)
return c_sources, cpp_sources, objc_sources, other_sources
def build_extension(self, ext):
"""Modified version of build_extension method from distutils.
Can handle compiler args for different files"""
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
sources = self.swig_sources(sources, ext)
extra_args = ext.extra_compile_args or []
extra_c_args = getattr(ext, "extra_compile_c_args", [])
extra_cpp_args = getattr(ext, "extra_compile_cpp_args", [])
extra_objc_args = getattr(ext, "extra_compile_objc_args", [])
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cpp_sources, objc_sources, other_sources = filter_sources(sources)
def _compile(src, args):
return self.compiler.compile(src,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args + args,
depends=ext.depends)
objects = []
objects += _compile(c_sources, extra_c_args)
objects += _compile(cpp_sources, extra_cpp_args)
objects += _compile(objc_sources, extra_objc_args)
objects += _compile(other_sources, [])
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
# patching
build_ext.build_extension = build_extension
glfw = [
"glfw/src/context.c"
,"glfw/src/init.c"
,"glfw/src/input.c"
,"glfw/src/monitor.c"
,"glfw/src/vulkan.c"
,"glfw/src/window.c"
]
glfw_platform = {
'darwin': [
"glfw/src/cocoa_init.m"
,"glfw/src/cocoa_joystick.m"
,"glfw/src/cocoa_monitor.m"
,"glfw/src/cocoa_window.m"
,"glfw/src/cocoa_time.c"
,"glfw/src/posix_thread.c"
,"glfw/src/nsgl_context.m"
,"glfw/src/egl_context.c"
,"glfw/src/osmesa_context.c"
],
'posix': [
"glfw/src/x11_init.c"
,"glfw/src/x11_monitor.c"
,"glfw/src/x11_window.c"
,"glfw/src/xkb_unicode.c"
,"glfw/src/posix_time.c"
,"glfw/src/posix_thread.c"
,"glfw/src/glx_context.c"
,"glfw/src/egl_context.c"
,"glfw/src/osmesa_context.c"
,"glfw/src/linux_joystick.c"
],
'win32': [
"glfw/src/win32_init.c"
,"glfw/src/win32_joystick.c"
,"glfw/src/win32_monitor.c"
,"glfw/src/win32_time.c"
,"glfw/src/win32_thread.c"
,"glfw/src/win32_window.c"
,"glfw/src/wgl_context.c"
,"glfw/src/egl_context.c"
,"glfw/src/osmesa_context.c"
]
}
imgui = [
"imgui/imgui.cpp"
,"imgui/imgui_demo.cpp"
,"imgui/imgui_draw.cpp"
,"imgui/imgui_widgets.cpp"
]
definitions = {
'darwin': [("_GLFW_COCOA", 1)],
'posix': [("GLFW_USE_OSMESA", 0), ("GLFW_USE_WAYLAND", 0), ("GLFW_USE_MIR", 0), ("_GLFW_X11", 1)],
'win32': [("GLFW_USE_HYBRID_HPG", 0), ("_GLFW_WIN32", 1), ("_CRT_SECURE_NO_WARNINGS", 1), ("NOMINMAX", 1)],
}
libs = {
'darwin': [],
'posix': ["rt", "m", "X11"],
'win32': ["gdi32", "opengl32", "Shell32"],
}
extra_link = {
'darwin': ["-framework", "Cocoa","-framework", "IOKit","-framework", "Cocoa","-framework", "CoreFoundation","-framework", "CoreVideo"],
'posix': [],
'win32': [],
}
extra_compile_args = {
'darwin': [],
'posix': [],
'win32': ['/MT', '/fp:fast', '/GL', '/GR-'],
}
extra_compile_cpp_args = {
'darwin': ['-std=c++11'],
'posix': ['-std=c++11'],
'win32': [],
}
extension = Extension("_bimpy",
imgui + glfw + glfw_platform[target_os] + ['bimpy.cpp', "imgui_glfw.cpp", "gl3w/src/gl3w.c"],
define_macros = definitions[target_os],
include_dirs=["glfw/include", "imgui", "pybind11/include", "gl3w/include"],
extra_compile_args=extra_compile_args[target_os],
extra_link_args=extra_link[target_os],
libraries = libs[target_os])
extension.extra_compile_cpp_args = extra_compile_cpp_args[target_os]
setup(
name='bimpy',
version='0.0.11',
description='bimpy - bundled imgui for python',
long_description=long_description,
url='https://github.com/podgorskiy/bimpy',
author='Stanislav Pidhorskyi',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
],
keywords='imgui ui',
packages=['bimpy'],
ext_modules=[extension],
)
|
py | 1a3d93b6c971fe2f0d91509497eb8280e45944ed | # -*- coding: utf-8 -*-
"""Test suite for axonius_api_client.wizard.wizard"""
import pytest
from axonius_api_client.api.wizards import Wizard
from axonius_api_client.constants.fields import ALL_NAME, Operators
from axonius_api_client.constants.wizards import Entry, Flags, Results, Types
from axonius_api_client.exceptions import NotFoundError, WizardError
from axonius_api_client.parsers.wizards import WizardParser
from ...utils import get_schema
class TestWizard:
@pytest.fixture(params=["api_devices", "api_users"])
def wizard(self, request):
apiobj = request.getfixturevalue(request.param)
obj = Wizard(apiobj=apiobj)
assert obj.APIOBJ == apiobj
assert isinstance(obj.PARSER, WizardParser)
return obj
class TestData:
@pytest.fixture
def test_data1(self, wizard):
simple = wizard.APIOBJ.FIELD_SIMPLE
cplex = wizard.APIOBJ.FIELD_COMPLEX
sub = wizard.APIOBJ.FIELD_COMPLEX_SUB
get_schema(apiobj=wizard.APIOBJ, field=cplex)
entries = [
{Entry.TYPE: Types.SIMPLE, Entry.VALUE: f"{simple} exists"},
{Entry.TYPE: Types.SIMPLE, Entry.VALUE: f"{simple} contains test"},
{Entry.TYPE: Types.SIMPLE, Entry.VALUE: f"|{simple} contains dev"},
{Entry.TYPE: Types.SIMPLE, Entry.VALUE: f"{cplex} exists"},
{
Entry.TYPE: Types.COMPLEX,
Entry.VALUE: f"!{cplex} // {sub} contains boom",
},
]
exp_exprs = [
{
"bracketWeight": 0,
"children": [
{
"condition": "",
"expression": {
"compOp": "",
"field": "",
"filteredAdapters": None,
"value": None,
},
"i": 0,
}
],
"compOp": "exists",
"field": f"{simple}",
"fieldType": "axonius",
"filter": f'(({simple} == ({{"$exists":true,"$ne":""}})))',
"filteredAdapters": None,
"leftBracket": False,
"logicOp": "",
"not": False,
"rightBracket": False,
"value": None,
},
{
"bracketWeight": 0,
"children": [
{
"condition": "",
"expression": {
"compOp": "",
"field": "",
"filteredAdapters": None,
"value": None,
},
"i": 0,
}
],
"compOp": "contains",
"field": f"{simple}",
"fieldType": "axonius",
"filter": f'and ({simple} == regex("test", "i"))',
"filteredAdapters": None,
"leftBracket": False,
"logicOp": "and",
"not": False,
"rightBracket": False,
"value": "test",
},
{
"bracketWeight": 0,
"children": [
{
"condition": "",
"expression": {
"compOp": "",
"field": "",
"filteredAdapters": None,
"value": None,
},
"i": 0,
}
],
"compOp": "contains",
"field": f"{simple}",
"fieldType": "axonius",
"filter": f'or ({simple} == regex("dev", "i"))',
"filteredAdapters": None,
"leftBracket": False,
"logicOp": "or",
"not": False,
"rightBracket": False,
"value": "dev",
},
{
"bracketWeight": 0,
"children": [
{
"condition": "",
"expression": {
"compOp": "",
"field": "",
"filteredAdapters": None,
"value": None,
},
"i": 0,
}
],
"compOp": "exists",
"field": f"{cplex}",
"fieldType": "axonius",
"filter": (
f'and (({cplex} == ({{"$exists":true,"$ne":[]}})) and ' f"{cplex} != [])"
),
"filteredAdapters": None,
"leftBracket": False,
"logicOp": "and",
"not": False,
"rightBracket": False,
"value": None,
},
{
"bracketWeight": 0,
"children": [
{
"condition": f'({sub} == regex("boom", "i"))',
"expression": {
"compOp": "contains",
"field": f"{sub}",
"filteredAdapters": None,
"value": "boom",
},
"i": 0,
}
],
"compOp": "",
"field": f"{cplex}",
"fieldType": "axonius",
"filter": (f'and not ({cplex} == match([({sub} == regex("boom", "i"))]))'),
"filteredAdapters": None,
"leftBracket": False,
"logicOp": "and",
"not": True,
"rightBracket": False,
"value": None,
"context": "OBJ",
},
]
exp_query = (
f'(({simple} == ({{"$exists":true,"$ne":""}}))) and ({simple} == '
f'regex("test", "i")) or ({simple} == regex("dev", "i")) and (({cplex} '
f'== ({{"$exists":true,"$ne":[]}})) and {cplex} != []) and not ({cplex} '
f'== match([({sub} == regex("boom", "i"))]))'
)
return entries, exp_exprs, exp_query
class TestCheckEntryType(TestWizard):
def test_invalid(self, wizard):
with pytest.raises(WizardError) as exc:
wizard._check_entry_type(etype="badwolf", types=Types.DICT)
assert "Invalid type" in str(exc.value)
def test_valid(self, wizard):
ret = wizard._check_entry_type(etype=Types.DICT[0].upper(), types=Types.DICT)
assert ret == Types.DICT[0]
class TestGetField(TestWizard):
@pytest.mark.parametrize(
"field, value_raw, exc_str",
[
[ALL_NAME, f"{ALL_NAME} blah blah", "Can not use"],
["badwolf", "badwolf blah blah", "Unable to find FIELD"],
],
)
def test_invalid(self, wizard, field, value_raw, exc_str):
with pytest.raises(WizardError) as exc:
wizard._get_field(value=field, value_raw=value_raw)
assert exc_str in str(exc)
def test_valid(self, wizard):
field = wizard.APIOBJ.FIELD_SIMPLE
ret = wizard._get_field(value=field, value_raw=f"{field} blah blah")
assert ret["name_qual"] == field
class TestGetFieldComplex(TestWizard):
@pytest.mark.parametrize(
"field, value_raw, exc_str",
[
[ALL_NAME, f"{ALL_NAME} blah blah", "Can not use"],
["badwolf", "badwolf blah blah", "Unable to find COMPLEX-FIELD"],
["internal_axon_id", "internal_axon_id blah blah", "Invalid COMPLEX-FIELD"],
],
)
def test_invalid(self, wizard, field, value_raw, exc_str):
with pytest.raises(WizardError) as exc:
wizard._get_field_complex(value=field, value_raw=value_raw)
assert exc_str in str(exc)
def test_valid(self, wizard):
field = wizard.APIOBJ.FIELD_COMPLEX
get_schema(apiobj=wizard.APIOBJ, field=field)
ret = wizard._get_field_complex(value=field, value_raw=f"{field} blah blah")
assert ret["name_qual"] == field
class TestGetOperator(TestWizard):
def test_valid(self, wizard):
field = {
"type": "string",
"name_qual": "badwolf",
"name": "badwolf",
"parent": "moo",
}
ret = wizard._get_operator(field=field, operator="equals", value_raw="boom")
assert ret == Operators.equals_str
def test_invalid(self, wizard):
field = {
"type": "string",
"name_qual": "badwolf",
"name": "badwolf",
"parent": "moo",
}
with pytest.raises(NotFoundError) as exc:
wizard._get_operator(field=field, operator="xx", value_raw="boom")
assert "Invalid OPERATOR name" in str(exc.value)
class TestCheckEntryKeys(TestWizard):
@pytest.mark.parametrize(
"entry, keys, exc_str",
[
[
{Entry.TYPE: "badwolf"},
Entry.REQ,
f"Missing required key {Entry.VALUE!r}",
],
[
{Entry.TYPE: "", Entry.VALUE: "y"},
Entry.REQ,
f"Empty required key {Entry.TYPE!r}",
],
[{Entry.TYPE: 1, Entry.VALUE: "y"}, Entry.REQ, "Invalid type "],
],
)
def test_invalid(self, wizard, entry, keys, exc_str):
with pytest.raises(WizardError) as exc:
wizard._check_entry_keys(entry=entry, keys=keys)
assert exc_str in str(exc.value)
@pytest.mark.parametrize(
"entry, keys",
[
[{Entry.TYPE: "xxx", Entry.VALUE: "y"}, Entry.REQ],
],
)
def test_valid(self, wizard, entry, keys):
wizard._check_entry_keys(entry=entry, keys=keys)
class TestSplitFlags(TestWizard):
@pytest.mark.parametrize(
"value_raw, exp",
[
[
f"{Flags.NOT} @ $ hostname contains blah {Flags.RIGHTB}",
([Flags.NOT, "@", "$", Flags.RIGHTB], "hostname contains blah "),
],
[
f" {Flags.NOT} {Flags.AND} hostname contains blah {Flags.RIGHTB}",
([Flags.NOT, Flags.AND, Flags.RIGHTB], "hostname contains blah"),
],
[
f"{Flags.NOT}{Flags.OR}hostname contains blah{Flags.RIGHTB}",
([Flags.NOT, Flags.OR, Flags.RIGHTB], "hostname contains blah"),
],
[
"hostname contains blah",
([], "hostname contains blah"),
],
[
"hostname contains blah ",
([], "hostname contains blah "),
],
[
"hostname contains blah s2904829 50(#*)$(!*&_)(@!",
([], "hostname contains blah s2904829 50(#*)$(!*&_)(@!"),
],
],
)
def test_valid(self, wizard, value_raw, exp):
ret = wizard._split_flags(value_raw=value_raw)
assert ret == exp
@pytest.mark.parametrize(
"value_raw",
["#@#$"],
)
def test_invalid(self, wizard, value_raw):
with pytest.raises(WizardError):
wizard._split_flags(value_raw=value_raw)
class TestSplitSimple(TestWizard):
@pytest.mark.parametrize(
"value_raw, exp",
[
[
"badwolf equals fool",
("badwolf", "equals", "fool"),
],
[
"badwolf equals fool it up",
("badwolf", "equals", "fool it up"),
],
[
"badwolf equals",
("badwolf", "equals", ""),
],
[
"badwolf_moo.foo equals",
("badwolf_moo.foo", "equals", ""),
],
],
)
def test_valid(self, wizard, value_raw, exp):
ret = wizard._split_simple(value_raw=value_raw)
assert ret == exp
@pytest.mark.parametrize(
"value_raw, exc_str",
[
["", "FIELD"],
["!ab@ contains blah", "FIELD"],
["badwolf", "OPERATOR"],
["badwolf 232 blah", "OPERATOR"],
],
)
def test_invalid(self, wizard, value_raw, exc_str):
with pytest.raises(WizardError) as exc:
wizard._split_simple(value_raw=value_raw)
assert exc_str in str(exc.value)
class TestSplitComplex(TestWizard):
@pytest.mark.parametrize(
"value_raw, exp",
[
[
"badwolf // subfield contains blah",
("badwolf", ["subfield contains blah"]),
],
[
"badwolf // subfield contains blah // subfield contains moo",
("badwolf", ["subfield contains blah", "subfield contains moo"]),
],
[
"badwolf_moo.foo // subfield contains blah // subfield contains moo",
(
"badwolf_moo.foo",
["subfield contains blah", "subfield contains moo"],
),
],
],
)
def test_valid(self, wizard, value_raw, exp):
ret = wizard._split_complex(value_raw=value_raw)
assert ret == exp
@pytest.mark.parametrize(
"value_raw, exc_str",
[
["", f"No {Entry.CSPLIT} found in value"],
["badwolf", f"No {Entry.CSPLIT} found in value"],
["!ab@ // subfield contains blah", "FIELD"],
],
)
def test_invalid(self, wizard, value_raw, exc_str):
with pytest.raises(WizardError) as exc:
wizard._split_complex(value_raw=value_raw)
assert exc_str in str(exc.value)
class TestParseFlags(TestWizard):
def test_valid1(self, wizard):
entry1 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"{Flags.NOT} @ $ hostname contains blah {Flags.RIGHTB}",
}
entries = [entry1]
is_open = False
tracker = 0
exp1_entry = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah ",
Entry.FLAGS: [Flags.NOT, "@", "$", Flags.RIGHTB, Flags.LEFTB],
Entry.WEIGHT: -1,
}
exp1_is_open = False
exp1_tracker = 0
ret1_entry, is_open, tracker = wizard._parse_flags(
entry=entry1, idx=0, entries=entries, tracker=tracker, is_open=is_open
)
assert ret1_entry == exp1_entry
assert is_open == exp1_is_open
assert tracker == exp1_tracker
def test_valid2(self, wizard):
entry1 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"{Flags.NOT} @ $ hostname contains blah ",
}
entries = [entry1]
is_open = False
tracker = 0
exp_entry = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah ",
Entry.FLAGS: [Flags.NOT, "@", "$"],
Entry.WEIGHT: 0,
}
exp_is_open = False
exp_tracker = 0
ret_entry, is_open, tracker = wizard._parse_flags(
entry=entry1, idx=0, entries=entries, tracker=tracker, is_open=is_open
)
assert ret_entry == exp_entry
assert is_open == exp_is_open
assert tracker == exp_tracker
def test_valid3(self, wizard):
entry1 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"{Flags.NOT}{Flags.LEFTB} hostname contains blah ",
}
entry2 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"{Flags.NOT}{Flags.LEFTB}hostname contains blah ",
}
entries = [entry1, entry2]
is_open = False
tracker = 0
exp1_entry = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah ",
Entry.FLAGS: [Flags.NOT, Flags.LEFTB],
Entry.WEIGHT: -1,
}
exp1_is_open = True
exp1_tracker = 0
ret1_entry, is_open, tracker = wizard._parse_flags(
entry=entry1, idx=0, entries=entries, tracker=tracker, is_open=is_open
)
assert ret1_entry == exp1_entry
assert is_open == exp1_is_open
assert tracker == exp1_tracker
exp2_entry = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah ",
Entry.FLAGS: [Flags.NOT, Flags.LEFTB, Flags.RIGHTB],
Entry.WEIGHT: -1,
}
exp2_is_open = True
exp2_tracker = 0
ret2_entry, ret2_is_open, ret2_tracker = wizard._parse_flags(
entry=entry2, idx=1, entries=entries, tracker=tracker, is_open=is_open
)
assert ret2_entry == exp2_entry
assert is_open == exp2_is_open
assert tracker == exp2_tracker
assert entry1[Entry.FLAGS] == [Flags.NOT, Flags.LEFTB, Flags.RIGHTB]
def test_valid4(self, wizard):
entry1 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"{Flags.LEFTB}hostname contains blah ",
}
entry2 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah ",
}
entry3 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"hostname contains blah{Flags.RIGHTB}",
}
entry4 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah",
}
entries = [entry1, entry2, entry3, entry4]
is_open = False
tracker = 0
exp1_entry = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah ",
Entry.FLAGS: [Flags.LEFTB],
Entry.WEIGHT: -1,
}
exp1_is_open = True
exp1_tracker = 0
ret1_entry, is_open, tracker = wizard._parse_flags(
entry=entry1, idx=0, entries=entries, tracker=tracker, is_open=is_open
)
assert ret1_entry == exp1_entry
assert is_open == exp1_is_open
assert tracker == exp1_tracker
exp2_entry = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah ",
Entry.FLAGS: [],
Entry.WEIGHT: 1,
}
exp2_is_open = True
exp2_tracker = 1
ret2_entry, is_open, tracker = wizard._parse_flags(
entry=entry2, idx=1, entries=entries, tracker=tracker, is_open=is_open
)
assert ret2_entry == exp2_entry
assert is_open == exp2_is_open
assert tracker == exp2_tracker
exp3_entry = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah",
Entry.FLAGS: [Flags.RIGHTB],
Entry.WEIGHT: 2,
}
exp3_is_open = False
exp3_tracker = 0
ret3_entry, is_open, tracker = wizard._parse_flags(
entry=entry3, idx=2, entries=entries, tracker=tracker, is_open=is_open
)
assert ret3_entry == exp3_entry
assert is_open == exp3_is_open
assert tracker == exp3_tracker
exp4_entry = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah",
Entry.FLAGS: [],
Entry.WEIGHT: 0,
}
exp4_is_open = False
exp4_tracker = 0
ret4_entry, is_open, tracker = wizard._parse_flags(
entry=entry4, idx=3, entries=entries, tracker=tracker, is_open=is_open
)
assert ret4_entry == exp4_entry
assert is_open == exp4_is_open
assert tracker == exp4_tracker
class TestParseEntries(TestWizard):
def test_valid(self, wizard):
entry1 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"{Flags.LEFTB}hostname contains blah ",
}
entry2 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah ",
}
entry3 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"hostname contains blah{Flags.RIGHTB}",
}
entry4 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: "hostname contains blah",
}
entries = [entry1, entry2, entry3, entry4]
source = "a test..."
exp = [
{
Entry.TYPE: "simple",
Entry.VALUE: "hostname contains blah ",
Entry.SRC: f"{source} entry #1/4",
Entry.FLAGS: [Flags.LEFTB],
Entry.WEIGHT: -1,
},
{
Entry.TYPE: "simple",
Entry.VALUE: "hostname contains blah ",
Entry.SRC: f"{source} entry #2/4",
Entry.FLAGS: [],
Entry.WEIGHT: 1,
},
{
Entry.TYPE: "simple",
Entry.VALUE: "hostname contains blah",
Entry.SRC: f"{source} entry #3/4",
Entry.FLAGS: [Flags.RIGHTB],
Entry.WEIGHT: 2,
},
{
Entry.TYPE: "simple",
Entry.VALUE: "hostname contains blah",
Entry.SRC: f"{source} entry #4/4",
Entry.FLAGS: [],
Entry.WEIGHT: 0,
},
]
ret = wizard._parse_entries(entries=entries, source=source)
assert ret == exp
def test_invalid(self, wizard):
entry1 = {
Entry.TYPE: Types.SIMPLE,
Entry.VALUE: f"{Flags.LEFTB}hostname contains blah ",
}
entry2 = {
Entry.TYPE: "merp",
Entry.VALUE: "hostname contains blah ",
}
entries = [entry1, entry2]
source = "a test..."
with pytest.raises(WizardError) as exc:
wizard._parse_entries(entries=entries, source=source)
assert f"Error parsing entry from {source}" in str(exc.value)
assert "entry #2/2" in str(exc.value)
class TestParseSimple(TestWizard):
def test_valid(self, wizard):
field = wizard.APIOBJ.FIELD_SIMPLE
entry = {
Entry.TYPE: "simple",
Entry.VALUE: f"{field} contains blah",
}
exp = {
"bracketWeight": 0,
"children": [
{
"condition": "",
"expression": {
"compOp": "",
"field": "",
"filteredAdapters": None,
"value": None,
},
"i": 0,
}
],
"compOp": "contains",
"field": field,
"fieldType": "axonius",
"filter": f'({field} == regex("blah", "i"))',
"filteredAdapters": None,
"leftBracket": False,
"logicOp": "",
"not": False,
"rightBracket": False,
"value": "blah",
}
ret = wizard._parse_simple(entry=entry, idx=0)
assert ret == exp
class TestParseComplex(TestWizard):
def test_valid(self, wizard):
field = wizard.APIOBJ.FIELD_COMPLEX
get_schema(apiobj=wizard.APIOBJ, field=field)
sub = wizard.APIOBJ.FIELD_COMPLEX_SUB
entry = {
Entry.TYPE: "complex",
Entry.VALUE: f"{field} // {sub} contains boom // {sub} exists",
}
exp = {
"bracketWeight": 0,
"children": [
{
"condition": f'({sub} == regex("boom", "i"))',
"expression": {
"compOp": "contains",
"field": sub,
"filteredAdapters": None,
"value": "boom",
},
"i": 0,
},
{
"condition": f'(({sub} == ({{"$exists":true,"$ne":""}})))',
"expression": {
"compOp": "exists",
"field": sub,
"filteredAdapters": None,
"value": None,
},
"i": 1,
},
],
"compOp": "",
"field": field,
"fieldType": "axonius",
"filter": (
f'({field} == match([({sub} == regex("boom", "i")) and (({sub} == '
'({"$exists":true,"$ne":""})))]))'
),
"filteredAdapters": None,
"leftBracket": False,
"logicOp": "",
"not": False,
"rightBracket": False,
"value": None,
"context": "OBJ",
}
ret = wizard._parse_complex(entry=entry, idx=0)
assert ret == exp
def test_invalid(self, wizard):
field = wizard.APIOBJ.FIELD_COMPLEX
get_schema(apiobj=wizard.APIOBJ, field=field)
sub = wizard.APIOBJ.FIELD_COMPLEX_SUB
entry = {
Entry.TYPE: "complex",
Entry.VALUE: f"{field} // {sub} contains boom // badwolf exists",
}
with pytest.raises(WizardError) as exc:
wizard._parse_complex(entry=entry, idx=0)
assert "Unable to find SUB-FIELD" in str(exc.value)
assert "Error parsing sub field" in str(exc.value)
class TestParseExprs(TestWizard):
def test_valid1(self, wizard):
field = wizard.APIOBJ.FIELD_SIMPLE
entries = [
{
Entry.TYPE: "simple",
Entry.VALUE: f"{field} contains blah",
}
]
exp = [
{
"bracketWeight": 0,
"children": [
{
"condition": "",
"expression": {
"compOp": "",
"field": "",
"filteredAdapters": None,
"value": None,
},
"i": 0,
}
],
"compOp": "contains",
"field": field,
"fieldType": "axonius",
"filter": f'({field} == regex("blah", "i"))',
"filteredAdapters": None,
"leftBracket": False,
"logicOp": "",
"not": False,
"rightBracket": False,
"value": "blah",
}
]
ret = wizard._parse_exprs(entries=entries)
assert ret == exp
def test_invalid(self, wizard):
entry = {Entry.TYPE: "badwolf"}
with pytest.raises(WizardError) as exc:
wizard._parse_exprs(entries=[entry])
assert "Error parsing expression from" in str(exc.value)
class TestParse(TestWizard, TestData):
def test_valid(self, wizard, test_data1):
entries, exp_exprs, exp_query = test_data1
ret = wizard.parse(entries=entries)
ret_query = ret[Results.QUERY]
ret_exprs = ret[Results.EXPRS]
assert ret_query == exp_query
assert ret_exprs == exp_exprs
# just make sure the REST API can parse the query
wizard.APIOBJ.get(query=ret[Results.QUERY], max_rows=1)
# make sure the REST API can create a saved query
name = "api wizard test"
try:
wizard.APIOBJ.saved_query.delete_by_name(value=name)
except Exception:
pass
sq = wizard.APIOBJ.saved_query.add(name=name, query=ret_query, expressions=ret_exprs)
assert sq["name"] == name
assert sq["view"]["query"]["filter"] == exp_query
assert sq["view"]["query"]["expressions"] == exp_exprs
wizard.APIOBJ.saved_query.delete_by_name(value=name)
|
py | 1a3d941a3e884ebba6ca929b178ee92e17f4000e | import argparse
import threading
import time
# import uuid
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Tuple, Union
import consul
import yaml
from consul.base import Check
from logger import create_info_logger
from utils.network import find_open_port, get_ip_address
logger = create_info_logger("registry", "registry.log")
config = None
current_service_id = None
class EndpointConfig:
Port: int
Host: str
Scheme: str
def __str__(self):
return self.Scheme + "://" + self.Host + ":" + str(self.Port)
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.next_call = time.time()
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self.next_call += self.interval
self._timer = threading.Timer(self.next_call - time.time(), self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class DebugWebServer(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>Registry info</title></head>", "utf-8"))
self.wfile.write(bytes("<p>Request: %s</p>" % self.path, "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
self.wfile.write(bytes("<p>Service status.</p>", "utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
def start_webserver(hostName, serverPort):
webServer = HTTPServer((hostName, serverPort), DebugWebServer)
logger.info("Server started http://%s:%s" % (hostName, serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
logger.info("Server stopped.")
def verify_connection(cfg: EndpointConfig) -> bool:
"""
Verify consul connection
Exceptions throw such as ConnectionError will be captured
"""
if cfg is None:
raise Exception("Configuration is required")
port = cfg.Port
host = cfg.Host
logger.debug('Verifying Consul connection to %s:%s', host, port)
try:
client = consul.Consul(host=host, port=port)
client.agent.self()
return True
except Exception:
pass
return False
def createClient(cfg: EndpointConfig, verify: bool = True) -> Tuple[consul.Consul, bool]:
"""
Create new consul client
"""
if cfg is None:
raise Exception("Configuration is required but got None")
try:
port = cfg.Port
host = cfg.Host
logger.info('Consul Host: %s Port: %s ', host, port)
client = consul.Consul(host=host, port=port)
online = False
if verify:
online = verify_connection(cfg)
logger.debug('Consul online : %s', online)
return client, online
except Exception:
pass
return None, False
def driver_version():
return consul.__version__
def getServiceByNameAndId(service_name, service_id):
c, online = createClient(config, True)
if not online:
return None
index, nodes = c.health.service(service_name)
for node in nodes:
if node['Service']['ID'] == service_id:
return node
return None
def register(service_host, service_port, service_id=None) -> Union[None, str]:
"""
Register new service in consul
"""
logger.info('Registering ServiceHost: %s Port: %s ',
service_host, service_port)
c, online = createClient(config, True)
if not online:
logger.debug('Consul service is offline')
return None
service_name = 'traefik-system-ingress'
service_url = f'http://{service_host}:{service_port}/api'
# TODO : Service ID generation needs to be configurable
# Create new service id, otherwise we will re-register same id
if service_id is None:
# service_id = f'{service_name}@{service_port}#{uuid.uuid4()}'
host = get_ip_address()
service_id = f'{service_name}@{host}:{service_port}'
# service_id = f'{service_name}@{service_port}'
logger.info('Service url: %s', service_url)
logger.info('Service id: %s', service_id)
# TODO: De-registration needs to be configurable
c.agent.service.register(
name=service_name,
service_id=service_id,
port=service_port,
address=service_host,
# check=Check.http(service_url, '10s', deregister='10m'),
check=Check.http(service_url, '10s'),
tags=[
"traefik.enable=true",
"traefik.consulcatalog.connect=false",
"traefik.http.routers.traefik-system-ingress.entrypoints=marie",
"traefik.http.routers.traefik-system-ingress.service=traefik-system-ingress",
"traefik.http.routers.traefik-system-ingress.rule=HostRegexp(`{host:.+}`)",
"traefik.http.services.traefik-system-ingress.loadbalancer.server.scheme=http",
])
return service_id
def start_watchdog(interval, service_host, service_port):
sid = current_service_id
def _register(_service_host, _service_port):
nonlocal sid
logger.info("watchdog:Host, Port, ServiceId : %s, %s, %s", _service_host, _service_port, sid)
online = verify_connection(config)
logger.info('watchdog:consul online : %s', online)
service_name = 'traefik-system-ingress'
if online:
node = getServiceByNameAndId(service_name, sid)
if node is None:
sid = register(service_host=_service_host, service_port=_service_port, service_id=sid)
logger.info('watchdog:Re-registered service: %s', sid)
logger.info("watchdog:starting with interval : %s", interval)
rt = RepeatedTimer(interval, _register, service_host, service_port)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument('--debug-server', type=bool, default=False, required=False, help='Should we start debug webserver')
# parser.add_argument('--port', type=int, default=-1, help='Port number to export (-1 dynamic)')
# parser.add_argument('--ip', type=str, default='127.0.0.1', help='Service IP to expose, blank for dynamic')
# parser.add_argument('--watchdog-interval', type=int, default=60, help='watchdog interval checkin seconds')
parser.add_argument('--config', type=str, default='./config/marie-debug.yml', help='Configuration file')
opt = parser.parse_args()
# Load config
with open(opt.config, "r") as yamlfile:
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
logger.info(f"Config read successfully : {opt.config}")
print(data)
enabled = bool(data['RegistryEnabled'])
if not enabled:
logger.info("registry not enabled, exiting...")
exit()
config = EndpointConfig()
config.Host = data['ConsulEndpoint']['Host']
config.Port = int(data['ConsulEndpoint']['Port'])
config.Scheme = data['ConsulEndpoint']['Scheme']
hostName = data['ServiceEndpoint']['Host']
serverPort = int(data['ServiceEndpoint']['Port'])
watchdog_interval = int(data['WatchdogInterval'])
debug_server = bool(data['DebugWebserver'])
if hostName is None or hostName == '':
hostName = get_ip_address()
if serverPort == -1:
serverPort = find_open_port()
current_service_id = register(
service_host=hostName, service_port=serverPort, service_id=None)
logger.info('Registered service: %s', current_service_id)
def _target():
return start_watchdog(watchdog_interval,
service_host=hostName, service_port=serverPort)
watchdog_task = threading.Thread(
target=_target, daemon=debug_server).start()
if debug_server:
start_webserver(hostName, serverPort)
|
py | 1a3d946eed168377da357f54d70d763d9e2f8ff3 | """Tests for checker module
"""
# from mathgrid import solver
from mathgrid.solver.basic_operations import Operator
from mathgrid import solver
def test_is_operator_01():
for item in '+-*/^':
assert solver.is_operator(item) is True
def test_is_operator_02():
assert solver.is_operator('h') is False
assert solver.is_operator('·') is False
assert solver.is_operator('0') is False
def test_is_expression_01():
assert solver.is_expression(['=', '(', '1', '+', '2', ')']) is True
def test_is_expression_02():
assert solver.is_expression(['(', '1', '+', '2', ')']) is False
def test_is_number_01():
for item in ['1', '-8', '2.2', '.6']:
assert solver.is_number(item) is True
def test_is_number_02():
assert solver.is_number('h') is False
assert solver.is_number('+') is False
assert solver.is_number(Operator.ADDITION) is False
def test_valid_num_brackets_01():
test_example = ['=', '(', '(', '8', '+', '3', ')',
'*', '2', ')', '/', '(', '5', '-', '1', ')']
assert solver.valid_num_brackets(test_example) is True
def test_valid_num_brackets_02():
test_example = ['=', ')', '(', '8', '+', '3',
')', '*', '2', '(', '/', '(', '5', '-', '1', ')']
assert solver.valid_num_brackets(test_example) is False
def test_check_expression_01():
test_example = ['=', '(', '(', '8', '+', '3', ')',
'*', '2', ')', '/', '(', '5', '-', '1', ')']
assert solver.check_expression(test_example) is True
test_example = ['=', '(', '(', 'h', '+', '3', ')',
'*', '2', ')', '/', '(', '5', '-', '1', ')']
assert solver.check_expression(test_example) is False
test_example = ['=', '(', '(', '8', '9', '+', '3', ')',
'*', '2', ')', '/', '(', '5', '-', '1', ')']
assert solver.check_expression(test_example) is False
test_example = ['=', '(', '(', '8', '+', '+', '3', ')',
'*', '2', ')', '/', '(', '5', '-', '1', ')']
assert solver.check_expression(test_example) is False
|
py | 1a3d95a8c7a04d791829958cb76320ab923507ca | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetWebApplicationFirewallPolicyResult',
'AwaitableGetWebApplicationFirewallPolicyResult',
'get_web_application_firewall_policy',
]
@pulumi.output_type
class GetWebApplicationFirewallPolicyResult:
"""
Defines web application firewall policy.
"""
def __init__(__self__, application_gateways=None, custom_rules=None, etag=None, http_listeners=None, id=None, location=None, managed_rules=None, name=None, path_based_rules=None, policy_settings=None, provisioning_state=None, resource_state=None, tags=None, type=None):
if application_gateways and not isinstance(application_gateways, list):
raise TypeError("Expected argument 'application_gateways' to be a list")
pulumi.set(__self__, "application_gateways", application_gateways)
if custom_rules and not isinstance(custom_rules, list):
raise TypeError("Expected argument 'custom_rules' to be a list")
pulumi.set(__self__, "custom_rules", custom_rules)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if http_listeners and not isinstance(http_listeners, list):
raise TypeError("Expected argument 'http_listeners' to be a list")
pulumi.set(__self__, "http_listeners", http_listeners)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_rules and not isinstance(managed_rules, dict):
raise TypeError("Expected argument 'managed_rules' to be a dict")
pulumi.set(__self__, "managed_rules", managed_rules)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if path_based_rules and not isinstance(path_based_rules, list):
raise TypeError("Expected argument 'path_based_rules' to be a list")
pulumi.set(__self__, "path_based_rules", path_based_rules)
if policy_settings and not isinstance(policy_settings, dict):
raise TypeError("Expected argument 'policy_settings' to be a dict")
pulumi.set(__self__, "policy_settings", policy_settings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_state and not isinstance(resource_state, str):
raise TypeError("Expected argument 'resource_state' to be a str")
pulumi.set(__self__, "resource_state", resource_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="applicationGateways")
def application_gateways(self) -> Sequence['outputs.ApplicationGatewayResponse']:
"""
A collection of references to application gateways.
"""
return pulumi.get(self, "application_gateways")
@property
@pulumi.getter(name="customRules")
def custom_rules(self) -> Optional[Sequence['outputs.WebApplicationFirewallCustomRuleResponse']]:
"""
The custom rules inside the policy.
"""
return pulumi.get(self, "custom_rules")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="httpListeners")
def http_listeners(self) -> Sequence['outputs.SubResourceResponse']:
"""
A collection of references to application gateway http listeners.
"""
return pulumi.get(self, "http_listeners")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedRules")
def managed_rules(self) -> 'outputs.ManagedRulesDefinitionResponse':
"""
Describes the managedRules structure.
"""
return pulumi.get(self, "managed_rules")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pathBasedRules")
def path_based_rules(self) -> Sequence['outputs.SubResourceResponse']:
"""
A collection of references to application gateway path rules.
"""
return pulumi.get(self, "path_based_rules")
@property
@pulumi.getter(name="policySettings")
def policy_settings(self) -> Optional['outputs.PolicySettingsResponse']:
"""
The PolicySettings for policy.
"""
return pulumi.get(self, "policy_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the web application firewall policy resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status of the policy.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebApplicationFirewallPolicyResult(GetWebApplicationFirewallPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebApplicationFirewallPolicyResult(
application_gateways=self.application_gateways,
custom_rules=self.custom_rules,
etag=self.etag,
http_listeners=self.http_listeners,
id=self.id,
location=self.location,
managed_rules=self.managed_rules,
name=self.name,
path_based_rules=self.path_based_rules,
policy_settings=self.policy_settings,
provisioning_state=self.provisioning_state,
resource_state=self.resource_state,
tags=self.tags,
type=self.type)
def get_web_application_firewall_policy(policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebApplicationFirewallPolicyResult:
"""
Defines web application firewall policy.
:param str policy_name: The name of the policy.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['policyName'] = policy_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20191101:getWebApplicationFirewallPolicy', __args__, opts=opts, typ=GetWebApplicationFirewallPolicyResult).value
return AwaitableGetWebApplicationFirewallPolicyResult(
application_gateways=__ret__.application_gateways,
custom_rules=__ret__.custom_rules,
etag=__ret__.etag,
http_listeners=__ret__.http_listeners,
id=__ret__.id,
location=__ret__.location,
managed_rules=__ret__.managed_rules,
name=__ret__.name,
path_based_rules=__ret__.path_based_rules,
policy_settings=__ret__.policy_settings,
provisioning_state=__ret__.provisioning_state,
resource_state=__ret__.resource_state,
tags=__ret__.tags,
type=__ret__.type)
|
py | 1a3d96a1b64aac9a0176262327efef6249b8e826 |
import os, sys, time, random, argparse, math
import numpy as np
from copy import deepcopy
from collections import defaultdict
import torch
import torch.nn as nn
import wandb
from tqdm import tqdm
from pathlib import Path
from hessian_eigenthings import compute_hessian_eigenthings
lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
from config_utils import load_config, dict2config, configure2str
from datasets import get_datasets, get_nas_search_loaders
from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint, get_optim_scheduler
from log_utils import AverageMeter, time_string, convert_secs2time
from utils import count_parameters_in_MB, obtain_accuracy
from utils.sotl_utils import _hessian, analyze_grads, eval_archs_on_batch, summarize_results_by_dataset, avg_nested_dict
from typing import *
from models.cell_searchs.generic_model import ArchSampler
import collections
def sample_arch_and_set_mode_search(args, outer_iter, sampled_archs, api, network, algo, arch_sampler,
step, logger, epoch, supernets_decomposition, all_archs, arch_groups_brackets, placement=None):
parsed_algo = algo.split("_")
sampling_done, lowest_loss_arch, lowest_loss = False, None, 10000 # Used for GreedyNAS online search space pruning - might have to resample many times until we find an architecture below the required threshold
sampled_arch = None
branch = None
if algo.startswith('setn'):
branch = "setn"
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo.startswith('gdas'):
branch = "gdas"
network.set_cal_mode('gdas', None)
if sampled_archs is not None and (not args.refresh_arch_oneshot or (args.refresh_arch_oneshot == "train_real" and placement in ["inner_sandwich", "outer"])):
assert placement in ["inner_sandwich", "outer", None]
network.last_gumbels = sampled_archs[outer_iter]
network.refresh_arch_oneshot = False
if epoch < 2 and step < 3:
logger.log(f"Set Gumbels at epoch={epoch}, outer_iter={outer_iter} = {network.last_gumbels}")
sampled_arch = network.genotype
elif algo.startswith('darts'):
branch = "darts"
network.set_cal_mode('joint', None)
sampled_arch = network.genotype
elif "random_" in algo and len(parsed_algo) > 1 and ("perf" in algo or "size" in algo):
branch = "random1"
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample()[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
elif "random" in algo and args.evenly_split is not None: # TODO should just sample outside of the function and pass it in as all_archs?
branch = "random2"
sampled_arch = arch_sampler.sample(mode="evenly_split", candidate_num = args.eval_candidate_num)[0]
network.set_cal_mode('dynamic', sampled_arch)
elif "random" in algo and args.sandwich is not None and args.sandwich > 1:
branch = "random_quartiles"
if args.search_space_paper == "nats-bench":
assert args.sandwich == 4 or args.sandwich_mode != "quartiles" # 4 corresponds to using quartiles
if step < 2 and epoch is not None and epoch < 2:
logger.log(f"Sampling from the Sandwich branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}")
logger.log(f"Sampled archs = {[api.archstr2index[x.tostr()] for x in sampled_archs]}, cur arch = {sampled_archs[outer_iter]}")
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
network.set_cal_mode('dynamic', sampled_arch)
else:
sampled_arch = sampled_archs[outer_iter]
network.set_cal_mode('dynamic', sampled_arch)
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_mode == "fairnas":
branch = "random_fairnas"
assert args.sandwich == len(network._op_names)
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
if step < 2 and epoch is not None and epoch < 2:
logger.log(f"Sampling from the FairNAS branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}, arch={sampled_arch}")
logger.log(f"Sampled archs = {[api.archstr2index[x.tostr()] for x in sampled_archs]}, cur arch = {sampled_archs[outer_iter]}")
network.set_cal_mode('dynamic', sampled_arch)
elif "random_" in algo and "grad" in algo:
network.set_cal_mode('urs')
elif algo == 'random': # NOTE the original branch needs to be last so that it is fall-through for all the special 'random' branches
branch = "random"
if supernets_decomposition or all_archs is not None or arch_groups_brackets is not None:
branch = "random_weird"
if all_archs is not None:
sampled_arch = random.sample(all_archs, 1)[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample(mode="random")[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
sampled_arch = network.sample_arch()
network.set_cal_mode('dynamic', sampled_arch)
else:
if args.search_space_paper == "nats-bench":
branch="random_basic"
network.set_cal_mode('urs', None)
else:
sampled_arch = network.sample_arch()
network.set_cal_mode('dynamic', sampled_arch)
elif algo == 'enas':
with torch.no_grad():
network.controller.eval()
_, _, sampled_arch = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
# if step < 2:
# print(f"Sample_arch through branch={branch}")
return sampled_arch
def sample_new_arch(network, algo, arch_sampler, sandwich_archs, all_archs, base_inputs, base_targets, arch_overview, loss_threshold, outer_iter, step, logger, supernets_decomposition, arch_groups_brackets, args):
# Need to sample a new architecture (considering it as a meta-batch dimension)
parsed_algo = algo.split("_")
sampling_done = False # Used for GreedyNAS online search space pruning - might have to resample many times until we find an architecture below the required threshold
lowest_loss_arch = None
lowest_loss = 10000
while not sampling_done: # TODO the sampling_done should be useful for like online sampling with rejections maybe
if algo == 'setn':
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo == 'gdas':
network.set_cal_mode('gdas', None)
sampled_arch = network.genotype
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
sampled_arch = network.genotype
elif "random_" in algo and len(parsed_algo) > 1 and ("perf" in algo or "size" in algo):
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample()[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
# elif "random" in algo and args.evenly_split is not None: # TODO should just sample outside of the function and pass it in as all_archs?
# sampled_arch = arch_sampler.sample(mode="evenly_split", candidate_num = args.eval_candidate_num)[0]
# network.set_cal_mode('dynamic', sampled_arch)
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_computation == "parallel":
assert args.sandwich_mode != "quartiles", "Not implemented yet"
sampled_arch = sandwich_archs[outer_iter]
network.set_cal_mode('dynamic', sampled_arch)
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_mode == "quartiles":
if args.search_space_paper == "nats-bench":
assert args.sandwich == 4 # 4 corresponds to using quartiles
if step == 0:
logger.log(f"Sampling from the Sandwich branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}")
sampled_archs = arch_sampler.sample(mode = "quartiles", subset = all_archs, candidate_num=args.sandwich) # Always samples 4 new archs but then we pick the one from the right quartile
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
elif "random_" in algo and "grad" in algo:
network.set_cal_mode('urs')
elif algo == 'random': # NOTE the original branch needs to be last so that it is fall-through for all the special 'random' branches
if supernets_decomposition or all_archs is not None or arch_groups_brackets is not None:
if all_archs is not None:
sampled_arch = random.sample(all_archs, 1)[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample(mode="random")[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs', None)
else:
network.set_cal_mode('urs', None)
elif algo == 'enas':
with torch.no_grad():
network.controller.eval()
_, _, sampled_arch = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
if loss_threshold is not None:
with torch.no_grad():
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets) * (1 if args.sandwich is None else 1/args.sandwich)
if base_loss.item() < lowest_loss:
lowest_loss = base_loss.item()
lowest_loss_arch = sampled_arch
if base_loss.item() < loss_threshold:
sampling_done = True
else:
sampling_done = True
if sampling_done:
arch_overview["cur_arch"] = sampled_arch
arch_overview["all_archs"].append(sampled_arch)
arch_overview["all_cur_archs"].append(sampled_arch)
return sampled_arch
def format_input_data(base_inputs, base_targets, arch_inputs, arch_targets, search_loader_iter, inner_steps, args, loader_type="train-val"):
# base_inputs, arch_inputs = base_inputs.cuda(non_blocking=True), arch_inputs.cuda(non_blocking=True)
# base_targets, arch_targets = base_targets.cuda(non_blocking=True), arch_targets.cuda(non_blocking=True)
base_inputs, base_targets = base_inputs.cuda(non_blocking=True), base_targets.cuda(non_blocking=True)
arch_inputs, arch_targets = arch_inputs.cuda(non_blocking=True), arch_targets.cuda(non_blocking=True)
if args.higher_method == "sotl":
arch_inputs, arch_targets = None, None
all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets = [base_inputs], [base_targets], [arch_inputs], [arch_targets]
for extra_step in range(inner_steps-1):
if args.inner_steps_same_batch:
all_base_inputs.append(base_inputs)
all_base_targets.append(base_targets)
all_arch_inputs.append(arch_inputs)
all_arch_targets.append(arch_targets)
continue # If using the same batch, we should not try to query the search_loader_iter for more samples
try:
if loader_type == "train-val" or loader_type == "train-train":
extra_base_inputs, extra_base_targets, extra_arch_inputs, extra_arch_targets = next(search_loader_iter)
else:
extra_base_inputs, extra_base_targets = next(search_loader_iter)
extra_arch_inputs, extra_arch_targets = None, None
except:
continue
# extra_base_inputs, extra_arch_inputs = extra_base_inputs.cuda(non_blocking=True), extra_arch_inputs.cuda(non_blocking=True)
# extra_base_targets, extra_arch_targets = extra_base_targets.cuda(non_blocking=True), extra_arch_targets.cuda(non_blocking=True)
extra_base_inputs, extra_base_targets = extra_base_inputs.cuda(non_blocking=True), extra_base_targets.cuda(non_blocking=True)
if extra_arch_inputs is not None and extra_arch_targets is not None:
extra_arch_inputs, extra_arch_targets = extra_arch_inputs.cuda(non_blocking=True), extra_arch_targets.cuda(non_blocking=True)
all_base_inputs.append(extra_base_inputs)
all_base_targets.append(extra_base_targets)
all_arch_inputs.append(extra_arch_inputs)
all_arch_targets.append(extra_arch_targets)
return all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets
def update_brackets(supernet_train_stats_by_arch, supernet_train_stats, supernet_train_stats_avgmeters, arch_groups_brackets, arch_overview, items, all_brackets, sampled_arch, args):
if arch_overview["cur_arch"] is not None:
if type(arch_groups_brackets) is dict:
cur_bracket = arch_groups_brackets[arch_overview["cur_arch"].tostr()]
for key, val in items:
supernet_train_stats_by_arch[sampled_arch.tostr()][key].append(val)
for bracket in all_brackets:
if bracket == cur_bracket:
supernet_train_stats[key]["sup"+str(cur_bracket)].append(val)
supernet_train_stats_avgmeters[key+"AVG"]["sup"+str(cur_bracket)].update(val)
supernet_train_stats[key+"AVG"]["sup"+str(cur_bracket)].append(supernet_train_stats_avgmeters[key+"AVG"]["sup"+str(cur_bracket)].avg)
else:
item_to_add = supernet_train_stats[key]["sup"+str(bracket)][-1] if len(supernet_train_stats[key]["sup"+str(bracket)]) > 0 else 3.14159
supernet_train_stats[key]["sup"+str(bracket)].append(item_to_add)
avg_to_add = supernet_train_stats_avgmeters[key+"AVG"]["sup"+str(bracket)].avg if supernet_train_stats_avgmeters[key+"AVG"]["sup"+str(bracket)].avg > 0 else 3.14159
supernet_train_stats[key+"AVG"]["sup"+str(bracket)].append(avg_to_add)
def get_finetune_scheduler(scheduler_type, config, xargs, network2, epochs=None, logger=None, best_lr=None):
if scheduler_type in ['linear_warmup', 'linear']:
config = config._replace(scheduler=scheduler_type, warmup=1, eta_min=0, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type == "cos_reinit":
# In practice, this leads to constant LR = 0.025 since the original Cosine LR is annealed over 100 epochs and our training schedule is very short
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type == "cos_restarts":
config = config._replace(scheduler='cos_restarts', warmup=0, epochs=epochs, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config, xargs)
elif scheduler_type in ['cos_adjusted']:
config = config._replace(scheduler='cos', warmup=0, epochs=epochs, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_fast']:
config = config._replace(scheduler='cos', warmup=0, LR=0.001 if xargs.lr is None else xargs.lr, epochs=epochs, eta_min=0, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_warmup']:
config = config._replace(scheduler='cos', warmup=1, LR=0.001 if xargs.lr is None else xargs.lr, epochs=epochs, eta_min=0, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ["scratch"]:
config_opt = load_config('./configs/nas-benchmark/hyper-opts/200E.config', None, logger)
config_opt = config_opt._replace(LR=0.1 if xargs.lr is None else xargs.lr, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config_opt)
elif scheduler_type in ["scratch12E"]:
config_opt = load_config('./configs/nas-benchmark/hyper-opts/12E.config', None, logger)
config_opt = config_opt._replace(LR=0.1 if xargs.lr is None else xargs.lr, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config_opt)
elif scheduler_type in ["scratch1E"]:
config_opt = load_config('./configs/nas-benchmark/hyper-opts/01E.config', None, logger)
config_opt = config_opt._replace(LR=0.1 if xargs.lr is None else xargs.lr, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config_opt)
elif (xargs.lr is not None or (xargs.lr is None and bool(xargs.adaptive_lr) == True)) and scheduler_type == 'constant':
config = config._replace(scheduler='constant', constant_lr=xargs.lr if not xargs.adaptive_lr else best_lr,
decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type == "constant":
config = config._replace(scheduler='constant', constant_lr=xargs.lr if not xargs.adaptive_lr else best_lr, decay = 0.0005 if xargs.postnet_decay is None else xargs.postnet_decay)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
else:
print(f"Unrecognized scheduler at {scheduler_type}")
raise NotImplementedError
return w_optimizer2, w_scheduler2, criterion
def find_best_lr(xargs, network2, train_loader, config, arch_idx):
lr_counts = {}
if xargs.adaptive_lr == "1cycle":
from torch_lr_finder import LRFinder
network3 = deepcopy(network2)
network3.logits_only = True
w_optimizer3, _, criterion = get_optim_scheduler(network3.weights, config, attach_scheduler=False)
lr_finder = LRFinder(network3, w_optimizer3, criterion, device="cuda")
lr_finder.range_test(train_loader, start_lr=0.0001, end_lr=1, num_iter=100)
best_lr = lr_finder.history["lr"][(np.gradient(np.array(lr_finder.history["loss"]))).argmin()]
try:
lr_plot_ax, weird_lr = lr_finder.plot(suggest_lr=True) # to inspect the loss-learning rate graph
except:
lr_plot_ax = lr_finder.plot(suggest_lr=False)
lr_finder.reset() # to reset the model and optimizer to their initial state
wandb.log({"lr_plot": lr_plot_ax}, commit=False)
elif xargs.adaptive_lr == "custom":
lrs = np.geomspace(1, 0.001, 10)
lr_results = {}
avg_of_avg_loss = AverageMeter()
for lr in tqdm(lrs, desc="Searching LRs"):
network3 = deepcopy(network2)
print(str(list(network3.parameters()))[0:100])
config = config._replace(scheduler='constant', constant_lr=lr)
w_optimizer3, _, criterion = get_optim_scheduler(network3.weights, config)
avg_loss = AverageMeter()
for batch_idx, data in tqdm(enumerate(train_loader), desc = f"Training in order to find the best LR for arch_idx={arch_idx}", disable=True):
if batch_idx > 20:
break
network3.zero_grad()
inputs, targets = data
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
_, logits = network3(inputs)
train_acc_top1, train_acc_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
loss = criterion(logits, targets)
avg_loss.update(loss.item())
loss.backward()
w_optimizer3.step()
lr_results[lr] = avg_loss.avg
avg_of_avg_loss.update(avg_loss.avg)
best_lr = min(lr_results, key = lambda k: lr_results[k])
print(lr_results)
lr_counts[best_lr] += 1
else:
best_lr = None
return best_lr
def sample_arch_and_set_mode(network, algo, arch_sampler, all_archs, parsed_algo, args, step, logger, sampled_archs, outer_iter):
sampled_arch = None
if algo.startswith('setn'):
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo.startswith('gdas'):
network.set_cal_mode('gdas', None)
sampled_arch = network.genotype
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
sampled_arch = network.genotype
elif "random_" in algo and len(parsed_algo) > 1 and ("perf" in algo or "size" in algo):
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample()[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_mode == "quartiles":
if args.search_space_paper == "nats-bench":
assert args.sandwich == 4 # 4 corresponds to using quartiles
if step == 0:
logger.log(f"Sampling from the Sandwich branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}")
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
network.set_cal_mode('dynamic', sampled_arch)
else:
network.set_cal_mode('urs')
elif "random" in algo and args.sandwich is not None and args.sandwich > 1 and args.sandwich_mode == "fairnas":
assert args.sandwich == len(network._op_names)
sampled_arch = sampled_archs[outer_iter] # Pick the corresponding quartile architecture for this iteration
if step == 0:
logger.log(f"Sampling from the FairNAS branch with sandwich={args.sandwich} and sandwich_mode={args.sandwich_mode}, arch={sampled_arch}")
network.set_cal_mode('dynamic', sampled_arch)
elif "random_" in algo and "grad" in algo:
network.set_cal_mode('urs')
elif algo == 'random': # NOTE the original branch needs to be last so that it is fall-through for all the special 'random' branches
if all_archs is not None:
sampled_arch = random.sample(all_archs, 1)[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
if args.search_space_paper == "nats-bench":
sampled_arch = arch_sampler.sample(mode="random")[0]
network.set_cal_mode('dynamic', sampled_arch)
else:
sampled_arch = network.sample_arch()
network.set_cal_mode('dynamic', sampled_arch)
elif algo == 'enas':
with torch.no_grad():
network.controller.eval()
_, _, sampled_arch = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
return sampled_arch
def valid_func(xloader, network, criterion, algo, logger, steps=None, grads=False):
data_time, batch_time = AverageMeter(), AverageMeter()
loss, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter()
end = time.time()
with torch.set_grad_enabled(grads):
network.eval()
for step, (arch_inputs, arch_targets) in enumerate(xloader):
if steps is not None and step >= steps:
break
arch_targets = arch_targets.cuda(non_blocking=True)
# prediction
_, logits = network(arch_inputs.cuda(non_blocking=True))
arch_loss = criterion(logits, arch_targets)
if grads:
arch_loss.backward()
# record
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
loss.update(arch_loss.item(), arch_inputs.size(0))
top1.update (arch_prec1.item(), arch_inputs.size(0))
top5.update (arch_prec5.item(), arch_inputs.size(0))
network.train()
return loss.avg, top1.avg, top5.avg
def train_controller(xloader, network, criterion, optimizer, prev_baseline, epoch_str, print_freq, logger, xargs, w_optimizer=None, train_loader=None):
# config. (containing some necessary arg)
# baseline: The baseline score (i.e. average val_acc) from the previous epoch
# NOTE the xloader is typically val loader
data_time, batch_time = AverageMeter(), AverageMeter()
GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time()
controller_num_aggregate = 20
controller_train_steps = 50
controller_bl_dec = 0.99
controller_entropy_weight = 0.0001
network.eval()
network.controller.train()
network.controller.zero_grad()
loader_iter = iter(xloader)
for step in tqdm(range(controller_train_steps * controller_num_aggregate), desc = "Training controller", total=controller_train_steps*controller_num_aggregate):
try:
inputs, targets = next(loader_iter)
except:
loader_iter = iter(xloader)
inputs, targets = next(loader_iter)
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - xend)
log_prob, entropy, sampled_arch = network.controller()
if xargs.discrete_diffnas_method in [None, "val"]:
with torch.no_grad():
network.set_cal_mode('dynamic', sampled_arch)
_, logits = network(inputs)
loss = criterion(logits, targets)
reward_metric, val_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
reward_metric = reward_metric.view(-1) / 100
elif xargs.discrete_diffnas_method in ["sotl"]:
if step == 0: print(f"ENAS train controller - supernet weight sample before finetune: {str(list(network.parameters())[1])[0:80]}")
eval_metrics, finetune_metrics = eval_archs_on_batch(xloader=xloader, archs=[sampled_arch], network=network, criterion=criterion, metric="loss",
train_steps=xargs.discrete_diffnas_steps, w_optimizer=w_optimizer, train_loader=train_loader,
progress_bar=False)
if step == 0: print(f"ENAS train controller - supernet weight sample after finetune (should be the same to make sure we do not change the original network): {str(list(network.parameters())[1])[0:80]}")
reward_metric = torch.tensor(finetune_metrics[sampled_arch]["sotl"][-1]) # Take the SOTL over all training steps as the reward
else:
raise NotImplementedError
reward = reward_metric + controller_entropy_weight * entropy
if prev_baseline is None:
baseline = reward_metric
else:
baseline = prev_baseline - (1 - controller_bl_dec) * (prev_baseline - reward)
loss = -1 * log_prob * (reward - baseline)
# account
RewardMeter.update(reward.item())
BaselineMeter.update(baseline.item())
ValAccMeter.update(reward_metric.item()*100)
LossMeter.update(loss.item())
EntropyMeter.update(entropy.item())
# Average gradient over controller_num_aggregate samples
loss = loss / controller_num_aggregate
loss.backward(retain_graph=True)
# measure elapsed time
batch_time.update(time.time() - xend)
xend = time.time()
if (step+1) % controller_num_aggregate == 0:
grad_norm = torch.nn.utils.clip_grad_norm_(network.controller.parameters(), 5.0)
GradnormMeter.update(grad_norm)
optimizer.step()
network.controller.zero_grad()
if step % print_freq == 0:
Sstr = '*Train-Controller* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, controller_train_steps * controller_num_aggregate)
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter)
Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Estr)
return LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg
def regularized_evolution_ws(network, train_loader, population_size, sample_size, mutate_arch, cycles, arch_sampler, api, config, xargs, train_steps=15, train_epochs=1, metric="loss"):
"""Algorithm for regularized evolution (i.e. aging evolution).
Follows "Algorithm 1" in Real et al. "Regularized Evolution for Image
Classifier Architecture Search".
Args:
cycles: the number of cycles the algorithm should run for.
population_size: the number of individuals to keep in the population.
sample_size: the number of individuals that should participate in each tournament.
time_budget: the upper bound of searching cost
Returns:
history: a list of `Model` instances, representing all the models computed
during the evolution experiment.
"""
# init_model = deepcopy(network.state_dict())
# init_optim = deepcopy(w_optimizer.state_dict())
population = collections.deque()
api.reset_time()
history = [] # Not used by the algorithm, only used to report results.
cur_best_arch = []
stats = {"pop":{"mean":[], "std":[]}}
top_ns = [1, 5, 10]
total_time = 0
model_init = deepcopy(network)
cycle_len = train_epochs if train_steps is None else train_steps/len(train_loader)*train_epochs
if cycles is None:
assert xargs.rea_epochs is not None
cycles = xargs.rea_epochs / cycle_len # Just super large number because we are using epoch budget
print(f"Converted cycles=None to cycles={cycles} since rea_epochs={xargs.rea_epochs} and each cycle has cycle_len={cycle_len}")
# Initialize the population with random models.
while len(population) < population_size:
model = deepcopy(network)
w_optimizer, w_scheduler, criterion = get_finetune_scheduler(xargs.scheduler, config, xargs, model, None)
cur_arch = arch_sampler.random_topology_func()
model.set_cal_mode("dynamic", cur_arch)
metrics, sum_metrics = eval_archs_on_batch(xloader=train_loader, archs=[cur_arch], network = model, criterion=criterion,
train_steps=train_steps, epochs=train_epochs, same_batch=True, metric=metric, train_loader=train_loader, w_optimizer=w_optimizer, progress_bar=False)
if xargs.rea_metric in ['loss', 'acc']:
decision_metric, decision_lambda = metrics[0], lambda x: x[metric][0]
elif xargs.rea_metric in ['sotl']:
decision_metric, decision_lambda = sum_metrics["loss"], lambda x: x["sum"]["loss"][-1]
elif xargs.rea_metric in ['soacc']:
decision_metric, decision_lambda = sum_metrics["acc"], lambda x: x["sum"]["acc"]
model.metric = decision_metric
model.arch = cur_arch
ground_truth = summarize_results_by_dataset(cur_arch, api=api, iepoch=199, hp='200')
history_stats = {"model":model, metric: metrics[0], "sum": sum_metrics, "arch": cur_arch, "ground_truth": ground_truth}
# Append the info
population.append(history_stats)
history.append(history_stats)
total_time += cycle_len
print(history)
top_n_perfs = sorted(history, key = decision_lambda, reverse=True) # Should start with the best and end with the worst
# Reformatting history into top-N logging
top_perfs = {}
for top in top_ns:
top_perf = {nth_top: top_n_perfs[min(nth_top, len(top_n_perfs)-1)]["ground_truth"]
for nth_top in range(top)}
top_perf = avg_nested_dict(top_perf)
top_perfs["top"+str(top)] = top_perf
cur_best_arch.append(top_n_perfs[0]["arch"].tostr())
wandb.log({"ground_truth":top_perfs, "total_time": total_time})
# Carry out evolution in cycles. Each cycle produces a model and removes another.
for i in tqdm(range(round(cycles)), desc = "Cycling in REA"):
# Sample randomly chosen models from the current population.
if total_time >= xargs.rea_epochs:
logger.log("Breaking REA early because the total budget was reached")
break
start_time, sample = time.time(), []
while len(sample) < sample_size:
# Inefficient, but written this way for clarity. In the case of neural
# nets, the efficiency of this line is irrelevant because training neural
# nets is the rate-determining step.
candidate = random.choice(list(population))
sample.append(candidate)
# The parent is the best model in the sample.
parent = max(sample, key=lambda i: i["model"].metric)
# Create the child model and store it.
child = deepcopy(network)
w_optimizer, w_scheduler, criterion = get_finetune_scheduler(xargs.scheduler, config, xargs, child, None)
cur_arch = mutate_arch(parent["model"].arch)
child.arch = cur_arch
child.set_cal_mode("dynamic", cur_arch)
metrics, sum_metrics = eval_archs_on_batch(xloader=train_loader, archs=[cur_arch], network = child, criterion=criterion, train_steps=train_steps, epochs=train_epochs, same_batch=True, metric=metric, train_loader=train_loader, w_optimizer=w_optimizer)
if xargs.rea_metric in ['loss', 'acc']:
decision_metric, decision_lambda = metrics[0], lambda x: x[metric][0]
elif xargs.rea_metric in ['sotl']:
decision_metric, decision_lambda = sum_metrics["loss"], lambda x: x["sum"]["loss"]
elif xargs.rea_metric in ['soacc']:
decision_metric, decision_lambda = sum_metrics["acc"], lambda x: x["sum"]["acc"]
child.metric = decision_metric
child.arch = cur_arch
ground_truth = summarize_results_by_dataset(cur_arch, api=api, iepoch=199, hp='200')
history_stats = {"model":child, metric: metrics[0], "sum": sum_metrics, "arch": cur_arch, "ground_truth": ground_truth}
# Append the info
population.append(history_stats)
history.append(history_stats)
total_time += cycle_len
top_n_perfs = sorted(history[-1], key = decision_lambda, reverse=True) # Should start with the best and end with the worst
# Reformatting history into top-N logging
top_perfs = {}
for top in top_ns:
top_perf = {nth_top: top_n_perfs[nth_top]["ground_truth"]
for nth_top in range(top)}
top_perf = avg_nested_dict(top_perf)
top_perfs["top"+str(top)] = top_perf
cur_best_arch.append(top_n_perfs[0]["arch"].tostr())
if i % 50 == 0:
print(f"REA best perf at iter={i} is {top_n_perfs[0]['ground_truth']}")
wandb.log({"ground_truth":top_perfs, "total_time": total_time})
# Remove the oldest model.
population.popleft()
return history, cur_best_arch, total_time
def search_func_bare(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, algo, logger, args=None, epoch=None, smoke_test=False,
meta_learning=False, api=None, supernets_decomposition=None, arch_groups_quartiles=None, arch_groups_brackets: Dict=None,
all_archs=None, grad_metrics_percentiles=None, metrics_percs=None, percentiles=None, loss_threshold=None, replay_buffer = None, checkpoint_freq=3, val_loader=None, train_loader=None, meta_optimizer=None):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(track_std=True), AverageMeter(track_std=True), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(track_std=True), AverageMeter(track_std=True), AverageMeter()
end = time.time()
network.train()
parsed_algo = algo.split("_")
if args.search_space_paper == "nats-bench":
if (len(parsed_algo) == 3 and ("perf" in algo or "size" in algo)): # Can be used with algo=random_size_highest etc. so that it gets parsed correctly
arch_sampler = ArchSampler(api=api, model=network, mode=parsed_algo[1], prefer=parsed_algo[2], op_names=network._op_names, max_nodes = args.max_nodes, search_space = args.search_space_paper)
else:
arch_sampler = ArchSampler(api=api, model=network, mode="random", prefer="random", op_names=network._op_names, max_nodes = args.max_nodes, search_space = args.search_space_paper) # TODO mode=perf is a placeholder so that it loads the perf_all_dict, but then we do sample(mode=random) so it does not actually exploit the perf information
else:
arch_sampler = None
arch_overview = {"cur_arch": None, "all_cur_archs": [], "all_archs": [], "top_archs_last_epoch": [], "train_loss": [], "train_acc": [], "val_acc": [], "val_loss": []}
search_loader_iter = iter(xloader)
if args.inner_steps is not None:
inner_steps = args.inner_steps
else:
inner_steps = 1 # SPOS equivalent
logger.log(f"Starting search with batch_size={len(next(iter(xloader)))}, len={len(xloader)}")
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in tqdm(enumerate(search_loader_iter), desc = "Iterating over SearchDataset", total = round(len(xloader)/(inner_steps if not args.inner_steps_same_batch else 1))): # Accumulate gradients over backward for sandwich rule
all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets = format_input_data(base_inputs, base_targets, arch_inputs, arch_targets, search_loader_iter, inner_steps, args)
network.zero_grad()
if smoke_test and step >= 3:
break
if step == 0:
logger.log(f"New epoch (len={len(search_loader_iter)}) of arch; for debugging, those are the indexes of the first minibatch in epoch: {base_targets[0:10]}")
scheduler.update(None, 1.0 * step / len(xloader))
# measure data loading time
data_time.update(time.time() - end)
if (args.sandwich is None or args.sandwich == 1):
outer_iters = 1
else:
outer_iters = args.sandwich
if args.sandwich_mode in ["quartiles", "fairnas"]:
sampled_archs = arch_sampler.sample(mode = args.sandwich_mode, subset = all_archs, candidate_num=args.sandwich) # Always samples 4 new archs but then we pick the one from the right quartile
for outer_iter in range(outer_iters):
# Update the weights
# sampled_arch = sample_arch_and_set_mode(network, algo, arch_sampler)
sampled_arch = None
network.set_cal_mode("urs", None)
if sampled_arch is not None:
arch_overview["cur_arch"] = sampled_arch
arch_overview["all_archs"].append(sampled_arch)
arch_overview["all_cur_archs"].append(sampled_arch)
fnetwork = network
fnetwork.zero_grad()
diffopt = w_optimizer
for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)):
if step in [0, 1] and inner_step < 3 and epoch % 5 == 0:
logger.log(f"Base targets in the inner loop at inner_step={inner_step}, step={step}: {base_targets[0:10]}")
# if algo.startswith("gdas"): # NOTE seems the forward pass doesnt explicitly change the genotype? The gumbels are always resampled in forward_gdas but it does not show up here
# logger.log(f"GDAS genotype at step={step}, inner_step={inner_step}, epoch={epoch}: {sampled_arch}")
_, logits = fnetwork(base_inputs)
base_loss = criterion(logits, base_targets) * (1 if args.sandwich is None else 1/args.sandwich)
base_loss.backward()
w_optimizer.step()
network.zero_grad()
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item() / (1 if args.sandwich is None else 1/args.sandwich), base_inputs.size(0))
base_top1.update (base_prec1.item(), base_inputs.size(0))
base_top5.update (base_prec5.item(), base_inputs.size(0))
arch_loss = torch.tensor(10) # Placeholder in case it never gets updated here. It is not very useful in any case
# Preprocess the hypergradients into desired form
if algo == 'setn':
network.set_cal_mode('joint')
elif algo.startswith('gdas'):
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif 'random' in algo:
network.set_cal_mode('urs', None)
elif algo != 'enas':
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
if algo == 'darts-v2' and not args.meta_algo:
arch_loss, logits = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets, meta_learning=meta_learning)
a_optimizer.step()
elif (algo == 'random' or algo == 'enas' or 'random' in algo ) and not args.meta_algo:
if algo == "random" and args.merge_train_val_supernet:
arch_loss = torch.tensor(10) # Makes it slower and does not return anything useful anyways
else:
arch_loss = torch.tensor(10)
# with torch.no_grad():
# _, logits = network(arch_inputs)
# arch_loss = criterion(logits, arch_targets)
else:
# The Darts-V1/FOMAML/GDAS/who knows what else branch
network.zero_grad()
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
a_optimizer.step()
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update (arch_prec1.item(), arch_inputs.size(0))
arch_top5.update (arch_prec5.item(), arch_inputs.size(0))
arch_overview["val_acc"].append(arch_prec1)
arch_overview["val_loss"].append(arch_loss.item())
arch_overview["all_cur_archs"] = [] #Cleanup
network.zero_grad()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)
Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)
if step == print_freq:
logger.log(network.show_alphas())
eigenvalues=None
search_metric_stds,supernet_train_stats, supernet_train_stats_by_arch = {}, {}, {}
search_metric_stds = {"train_loss.std": base_losses.std, "train_loss_arch.std": base_losses.std, "train_acc.std": base_top1.std, "train_acc_arch.std": arch_top1.std}
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg, supernet_train_stats, supernet_train_stats_by_arch, arch_overview, search_metric_stds, eigenvalues
def train_epoch(train_loader, network, w_optimizer, criterion, algo, logger):
data_time, batch_time = AverageMeter(), AverageMeter()
loss, top1, top5 = AverageMeter(), AverageMeter(), AverageMeter()
network.train()
if algo.startswith('setn'):
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo.startswith('gdas'):
network.set_cal_mode('gdas', None)
sampled_arch = network.genotype
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
sampled_arch = network.genotype
elif "random" in algo: # TODO REMOVE SOON
network.set_cal_mode('urs')
start = time.time()
for step, (inputs, targets) in tqdm(enumerate(train_loader), desc = "Iterating over batches while training weights only", total = len(train_loader)):
targets = targets.cuda(non_blocking=True)
_, logits = network(inputs.cuda(non_blocking=True))
train_loss = criterion(logits, targets)
train_loss.backward()
w_optimizer.step()
network.zero_grad()
prec1, prec5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
loss.update(train_loss.item(), inputs.size(0))
top1.update (prec1.item(), inputs.size(0))
top5.update (prec5.item(), inputs.size(0))
end = time.time()
logger.log(f"Trained epoch in {end-start} time, avg loss = {loss.avg}, avg acc = {top1.avg}")
return loss.avg, top1.avg, top5.avg
def evenify_training(network2, train_loader, criterion, w_optimizer2, logger, arch_idx, epoch_eqs, sampled_arch):
# Train each architecture until they all reach the same amount of training as a preprocessing step before recording the training statistics for correlations
cur_epoch, target_loss = epoch_eqs[sampled_arch.tostr()]["epoch"], epoch_eqs[sampled_arch.tostr()]["val"]
max_epoch_attained = max([x["val"] for x in epoch_eqs.values()])
done = False
iter_count=0
while not done:
avg_loss = AverageMeter()
for batch_idx, data in tqdm(enumerate(train_loader), desc = f"Evenifying training for arch_idx={arch_idx}"):
if avg_loss.avg < target_loss and batch_idx >= 15 and avg_loss.avg != 0:
done = True
break
network2.zero_grad()
inputs, targets = data
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
_, logits = network2(inputs)
train_acc_top1, train_acc_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
loss = criterion(logits, targets)
avg_loss.update(loss.item())
loss.backward()
w_optimizer2.step()
iter_count += 1
logger.log(f"Trained arch_idx for {iter_count} iterations to make it match up to {max_epoch_attained}")
def exact_hessian(network, val_loader, criterion, xloader, epoch, logger, args):
labels = []
for i in range(network._max_nodes):
for n in network._op_names:
labels.append(n + "_" + str(i))
network.logits_only=True
val_x, val_y = next(iter(val_loader))
val_loss = criterion(network(val_x.to('cuda')), val_y.to('cuda'))
try:
train_x, train_y, _, _ = next(iter(xloader))
except:
train_x, train_y = next(iter(xloader))
train_loss = criterion(network(train_x.to('cuda')), train_y.to('cuda'))
val_hessian_mat = _hessian(val_loss, network.arch_params())
if epoch == 0:
print(f"Example architecture Hessian: {val_hessian_mat}")
val_eigenvals, val_eigenvecs = torch.eig(val_hessian_mat)
try:
if not args.merge_train_val_supernet:
train_hessian_mat = _hessian(train_loss, network.arch_params())
train_eigenvals, train_eigenvecs = torch.eig(train_hessian_mat)
else:
train_eigenvals = val_eigenvals
except:
train_eigenvals = val_eigenvals
val_eigenvals = val_eigenvals[:, 0] # Drop the imaginary components
if epoch == 0:
print(f"Example architecture eigenvals: {val_eigenvals}")
train_eigenvals = train_eigenvals[:, 0]
val_dom_eigenvalue = torch.max(val_eigenvals)
train_dom_eigenvalue = torch.max(train_eigenvals)
eigenvalues = {"max":{}, "spectrum": {}}
eigenvalues["max"]["train"] = train_dom_eigenvalue
eigenvalues["max"]["val"] = val_dom_eigenvalue
eigenvalues["spectrum"]["train"] = {k:v for k,v in zip(labels, train_eigenvals)}
eigenvalues["spectrum"]["val"] = {k:v for k,v in zip(labels, val_eigenvals)}
network.logits_only=False
return eigenvalues
def approx_hessian(network, val_loader, criterion, xloader, args):
network.logits_only=True
val_eigenvals, val_eigenvecs = compute_hessian_eigenthings(network, val_loader, criterion, 1, mode="power_iter",
power_iter_steps=50, arch_only=True, max_samples=64, full_dataset=True)
val_dom_eigenvalue = val_eigenvals[0]
try:
if hasattr(args, "merge_train_val_supernet") and not args.merge_train_val_supernet:
train_eigenvals, train_eigenvecs = compute_hessian_eigenthings(network, val_loader, criterion, 1, mode="power_iter",
power_iter_steps=50, arch_only=True, max_samples=64, full_dataset=True)
train_dom_eigenvalue = train_eigenvals[0]
else:
train_eigenvals, train_eigenvecs = None, None
train_dom_eigenvalue = None
except:
train_eigenvals, train_eigenvecs, train_dom_eigenvalue = None, None, None
eigenvalues = {"max":{}, "spectrum": {}}
eigenvalues["max"]["val"] = val_dom_eigenvalue
eigenvalues["max"]["train"] = train_dom_eigenvalue
network.logits_only=False
network.zero_grad()
return eigenvalues
# The following three functions are used for DARTS-V2
def _concat(xs):
return torch.cat([x.view(-1) for x in xs])
def _hessian_vector_product(vector, network, criterion, base_inputs, base_targets, r=1e-2):
R = r / _concat(vector).norm()
for p, v in zip(network.weights, vector):
p.data.add_(R, v)
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
grads_p = torch.autograd.grad(loss, network.alphas)
for p, v in zip(network.weights, vector):
p.data.sub_(2*R, v)
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
grads_n = torch.autograd.grad(loss, network.alphas)
for p, v in zip(network.weights, vector):
p.data.add_(R, v)
return [(x-y).div_(2*R) for x, y in zip(grads_p, grads_n)]
def backward_step_unrolled_darts(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets):
# _compute_unrolled_model
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
start=time.time()
LR, WD, momentum = w_optimizer.param_groups[0]['lr'], w_optimizer.param_groups[0]['weight_decay'], w_optimizer.param_groups[0]['momentum']
with torch.no_grad():
theta = _concat(network.weights)
try:
moment = _concat(w_optimizer.state[v]['momentum_buffer'] for v in network.weights)
moment = moment.mul_(momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(torch.autograd.grad(loss, network.weights)) + WD*theta
params = theta.sub(LR, moment+dtheta)
# print(f"Time of momentum whatever: {time.time()-start}")
start=time.time()
unrolled_model = deepcopy(network)
model_dict = unrolled_model.state_dict()
new_params, offset = {}, 0
start2=time.time()
for k, v in network.named_parameters():
if 'arch' in k or 'alpha' in k: continue
v_length = np.prod(v.size())
model_dict[k] = params[offset: offset+v_length].view(v.size())
offset += v_length
# print(f"Loading shit subroutine : {time.time()-start2}")
# model_dict.update(new_params)
# unrolled_model.load_state_dict(model_dict)
# print(f"Loading shit {time.time()-start}")
start=time.time()
unrolled_model.zero_grad()
_, unrolled_logits = unrolled_model(arch_inputs)
unrolled_loss = criterion(unrolled_logits, arch_targets)
unrolled_loss.backward()
# print(f"Model forward: {time.time()-start}")
dalpha = [p.grad for p in unrolled_model.arch_parameters]
vector = [v.grad.data for v in unrolled_model.weights]
start=time.time()
implicit_grads = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets)
# print(f"time of hvp: {time.time()-start}")
for g, ig in zip(dalpha, implicit_grads):
# dalpha.data.sub_(LR, implicit_grads.data)
g.data.sub_(LR, ig.data)
for p, da in zip(network.arch_parameters, dalpha):
if p.grad is None:
p.grad = deepcopy( da )
else:
p.data.copy_( da.data )
return unrolled_loss.detach(), unrolled_logits.detach()
def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets, meta_learning=False):
# _compute_unrolled_model
if meta_learning in ['all', 'arch_only']:
base_inputs = arch_inputs
base_targets = arch_targets
_, logits = network(base_inputs)
loss = criterion(logits, base_targets)
LR, WD, momentum = w_optimizer.param_groups[0]['lr'], w_optimizer.param_groups[0]['weight_decay'], w_optimizer.param_groups[0]['momentum']
with torch.no_grad():
theta = _concat(network.weights)
try:
moment = _concat(w_optimizer.state[v]['momentum_buffer'] for v in network.weights)
moment = moment.mul_(momentum)
except:
moment = torch.zeros_like(theta)
dtheta = _concat(torch.autograd.grad(loss, network.weights)) + WD*theta
params = theta.sub(LR, moment+dtheta)
unrolled_model = deepcopy(network)
model_dict = unrolled_model.state_dict()
new_params, offset = {}, 0
for k, v in network.named_parameters():
if 'arch_parameters' in k: continue
v_length = np.prod(v.size())
new_params[k] = params[offset: offset+v_length].view(v.size())
offset += v_length
model_dict.update(new_params)
unrolled_model.load_state_dict(model_dict)
unrolled_model.zero_grad()
_, unrolled_logits = unrolled_model(arch_inputs)
unrolled_loss = criterion(unrolled_logits, arch_targets)
unrolled_loss.backward()
dalpha = unrolled_model.arch_parameters.grad
vector = [v.grad.data for v in unrolled_model.weights]
[implicit_grads] = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets)
dalpha.data.sub_(LR, implicit_grads.data)
if network.arch_parameters.grad is None:
network.arch_parameters.grad = deepcopy( dalpha )
else:
network.arch_parameters.grad.data.copy_( dalpha.data )
return unrolled_loss.detach(), unrolled_logits.detach()
def update_supernets_decomposition(supernets_decomposition, arch_groups_quartiles, losses_percs, grad_metrics_percentiles, base_loss, data_step, epoch, xloader, sampled_arch,
fnetwork):
# TODO need to fix the logging here I think. The normal logging is much better now
cur_quartile = arch_groups_quartiles[sampled_arch.tostr()]
with torch.no_grad():
dw = [p.grad.detach().to('cpu') if p.grad is not None else torch.zeros_like(p).to('cpu') for p in
fnetwork.parameters()]
cur_supernet = supernets_decomposition[cur_quartile]
for decomp_w, g in zip(cur_supernet.parameters(), dw):
if decomp_w.grad is not None:
decomp_w.grad.copy_(g)
else:
decomp_w.grad = g
analyze_grads(cur_supernet, grad_metrics_percentiles["perc" + str(cur_quartile)]["supernet"],
true_step=data_step + epoch * len(xloader), total_steps=data_step + epoch * len(xloader))
if type(arch_groups_quartiles) is dict:
for quartile in arch_groups_quartiles.keys():
if quartile == cur_quartile:
losses_percs["perc" + str(quartile)].update(base_loss.item()) # TODO this doesnt make any sense
def bracket_tracking_setup(arch_groups_brackets, brackets_cond, arch_sampler):
all_brackets = set(arch_groups_brackets.values()) if brackets_cond else set()
supernet_train_stats = {"train_loss":{"sup"+str(percentile): [] for percentile in all_brackets},
"val_loss": {"sup"+str(percentile): [] for percentile in all_brackets},
"val_acc": {"sup"+str(percentile): [] for percentile in all_brackets},
"train_acc": {"sup"+str(percentile): [] for percentile in all_brackets}}
supernet_train_stats_by_arch = {arch: {"train_loss": [], "val_loss": [], "train_acc": [], "val_acc": []} for arch in (arch_sampler.archs if arch_sampler is not None else {})}
supernet_train_stats_avgmeters = {}
for k in list(supernet_train_stats.keys()):
supernet_train_stats[k+str("AVG")] = {"sup"+str(percentile): [] for percentile in all_brackets}
supernet_train_stats_avgmeters[k+str("AVG")] = {"sup"+str(percentile): AverageMeter() for percentile in all_brackets}
return all_brackets, supernet_train_stats, supernet_train_stats_by_arch, supernet_train_stats_avgmeters
def update_running(running, valid_loss=None, valid_acc = None, valid_acc_top5=None, loss=None, train_acc_top1=None,
train_acc_top5=None, sogn=None, sogn_norm=None, total_train_loss_for_sotl_aug=None):
if valid_loss is not None:
running["sovl"] -= valid_loss
if valid_acc is not None:
running["sovalacc"] += valid_acc
# if valid_acc_top5 is not None:
# running["sovalacc_top5"] += valid_acc_top5
# if train_acc_top5 is not None:
# running["sotrainacc_top5"] += train_acc_top5
if loss is not None:
running["sotl"] -= loss # Need to have negative loss so that the ordering is consistent with val acc
if train_acc_top1 is not None:
running["sotrainacc"] += train_acc_top1
if sogn is not None:
# running["sogn"] += grad_metrics["train"]["sogn"]
running["sogn"] += sogn
if sogn_norm is not None:
# running["sogn_norm"] += grad_metrics["train"]["grad_normalized"]
running["sogn_norm"] += sogn_norm
if total_train_loss_for_sotl_aug is not None:
# running["sotl_aug"] = running["sotl"] + total_metrics_dict["total_train_loss"]
running["sotl_aug"] = running["sotl"] + total_train_loss_for_sotl_aug
if valid_loss is not None and loss is not None:
running["sobothl"] -= (valid_loss + loss)
return running
def update_base_metrics(metrics, running, metrics_keys=None, grad_metrics=None, drop_fancy=False, grads_analysis=None,
valid_acc=None, train_acc=None, loss=None, valid_loss=None, arch_str=None, epoch_idx = None):
if metrics_keys is None:
metrics_keys = metrics.keys()
for k in running.keys():
metrics[k][arch_str][epoch_idx].append(running[k])
metrics["val_acc"][arch_str][epoch_idx].append(valid_acc)
metrics["train_acc"][arch_str][epoch_idx].append(train_acc)
metrics["train_loss"][arch_str][epoch_idx].append(-loss)
metrics["val_loss"][arch_str][epoch_idx].append(-valid_loss)
metrics["gap_loss"][arch_str][epoch_idx].append(-valid_loss + (loss - valid_loss))
# if arch_str is not None and epoch_idx is not None:
# if len(metrics["train_loss"][arch_str][epoch_idx]) >= 3:
# loss_normalizer = sum(metrics["train_loss"][arch_str][epoch_idx][-3:])/3
# elif epoch_idx >= 1:
# loss_normalizer = sum(metrics["train_loss"][arch_str][epoch_idx-1][-3:])/3
# else:
# loss_normalizer = 1
# metrics["train_loss_pct"][arch_str][epoch_idx].append(1-loss/loss_normalizer)
data_types = ["train"] if not grads_analysis else ["train", "val", "total_train", "total_val"]
grad_log_keys = ["gn", "gnL1", "sogn", "sognL1", "grad_normalized", "grad_accum", "grad_accum_singleE", "grad_accum_decay", "grad_mean_accum", "grad_mean_sign", "grad_var_accum", "grad_var_decay_accum"]
if not drop_fancy and grad_metrics is not None:
for data_type in data_types:
for log_key in grad_log_keys:
val = grad_metrics[data_type][log_key]
metrics[data_type+"_"+log_key][arch_str][epoch_idx].append(grad_metrics[data_type][log_key])
return metrics
def load_my_state_dict(model, state_dict):
own_state = model.state_dict()
for name, param in state_dict.items():
if name not in own_state or 'classifier' in name:
continue
if isinstance(param, torch.nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
def resolve_higher_conds(xargs):
use_higher_cond = xargs.meta_algo and xargs.meta_algo not in ['reptile', 'metaprox']
if xargs.meta_algo is not None and 'darts' in xargs.meta_algo and xargs.higher_method == "joint" and (xargs.sandwich is None or xargs.sandwich == 1): # Special case for single-level DARTS training
print("Set use_higher_cond to False because using single-level DARTS most likely")
use_higher_cond = False
diffopt_higher_grads_cond = True if (xargs.meta_algo not in ['reptile', 'metaprox', 'reptile_higher'] and xargs.higher_order != "first") else False
monkeypatch_higher_grads_cond = True if (xargs.meta_algo not in ['reptile', 'metaprox', 'reptile_higher'] and (xargs.higher_order != "first" or xargs.higher_method == "val")) else False
first_order_grad_for_free_cond = xargs.higher_order == "first" and xargs.higher_method in ["sotl", "sotl_v2"]
first_order_grad_concurrently_cond = xargs.higher_order == "first" and xargs.higher_method.startswith("val")
second_order_grad_optimization_cond = xargs.higher_order == "second" and xargs.higher_method in ["sotl", "sotl_v2"]
print(f"Resolved higher conds as use_higher_cond={use_higher_cond}, diffopt_higher_grads_cond={diffopt_higher_grads_cond}, monkeypatch_higher_grads_cond={monkeypatch_higher_grads_cond}, first_order_grad_for_free_cond={first_order_grad_for_free_cond}, first_order_grad_concurrently_cond={first_order_grad_concurrently_cond}, second_order_grad_optimization_cond={second_order_grad_optimization_cond}")
return use_higher_cond, diffopt_higher_grads_cond, monkeypatch_higher_grads_cond, first_order_grad_for_free_cond, first_order_grad_concurrently_cond, second_order_grad_optimization_cond
def init_search_from_checkpoint(search_model, logger, xargs):
# The supernet init path can have form like '1,2,3' or 'darts_1,darts_2,darts_3' or 'cifar10_random_1, cifar10_random_2, cifar10_random_3'
split_path = xargs.supernet_init_path.split(",")
whole_path = split_path[xargs.rand_seed % len(split_path)]
logger.log(f"Picked {xargs.rand_seed % len(split_path)}-th seed from {xargs.supernet_init_path}")
if os.path.exists(xargs.supernet_init_path):
pass
else:
try:
dataset, algo = "cifar10", "random" # Defaults
parsed_init_path = whole_path.split("_") # Should be algo followed by seed number, eg. darts_1 or random_30 or cifar100_random_50
logger.log(f"Parsed init path into {parsed_init_path}")
if len(parsed_init_path) == 2:
seed_num = int(parsed_init_path[1])
seed_algo = parsed_init_path[0]
if len(parsed_init_path) == 3:
seed_num = int(parsed_init_path[2])
seed_algo = parsed_init_path[1]
dataset = parsed_init_path[0]
whole_path = f'./output/search-tss/{dataset}/{seed_algo}-affine0_BN0-None/checkpoint/seed-{seed_num}-basic.pth'
except Exception as e:
logger.log(f"Supernet init path does not seem to be formatted as seed number - it is {xargs.supernet_init_path}, error was {e}")
logger.log(f'Was given supernet checkpoint to use as initialization at {xargs.supernet_init_path}, decoded into {whole_path} and loaded its weights into search model')
checkpoint = torch.load(whole_path)
# The remaining things that are usually contained in a checkpoint are restarted to empty a bit further down
search_model.load_state_dict(checkpoint['search_model'], strict=False)
# load_my_state_dict(model, checkpoint["search_model"])
def init_supernets_decomposition(xargs, logger, checkpoint, network):
percentiles = [0, 25, 50, 75, 100]
empty_network = deepcopy(network).to('cpu') # TODO dont actually need to use those networks in the end? Can just use grad_metrics I think
with torch.no_grad():
for p in empty_network.parameters():
p.multiply_(0.)
supernets_decomposition = {percentiles[i+1]:empty_network for i in range(len(percentiles)-1)}
supernets_decomposition["init"] = deepcopy(network)
logger.log(f'Initialized {len(percentiles)} supernets because supernet_decomposition={xargs.supernets_decomposition}')
arch_groups_quartiles = arch_percentiles(percentiles=percentiles, mode=xargs.supernets_decomposition_mode)
if (last_info_orig.exists() and "grad_metrics_percs" not in checkpoint.keys()) or not last_info_orig.exists():
# TODO what is the point of this archs_subset?
archs_subset = network.return_topK(-1 if xargs.supernets_decomposition_topk is None else xargs.supernets_decomposition_topk, use_random=False) # Should return all archs for negative K
grad_metrics_percs = {"perc"+str(percentiles[i+1]):init_grad_metrics(keys=["supernet"]) for i in range(len(percentiles)-1)}
else:
grad_metrics_percs = checkpoint["grad_metrics_percs"]
archs_subset = checkpoint["archs_subset"]
metrics_factory = {"perc"+str(percentile):[[] for _ in range(total_epoch)] for percentile in percentiles}
metrics_percs = DefaultDict_custom()
metrics_percs.set_default_item(metrics_factory)
return percentiles, supernets_decomposition, arch_groups_quartiles, archs_subset, grad_metrics_percs, metrics_factory, metrics_percs
def scheduler_step(w_scheduler2, epoch_idx, batch_idx, train_loader, steps_per_epoch, scheduler_type):
if scheduler_type in ["linear", "linear_warmup"]:
w_scheduler2.update(epoch_idx, 1.0 * batch_idx / min(len(train_loader), steps_per_epoch))
elif scheduler_type == "cos_adjusted":
w_scheduler2.update(epoch_idx , batch_idx/min(len(train_loader), steps_per_epoch))
elif scheduler_type == "cos_reinit":
w_scheduler2.update(epoch_idx, 0.0)
elif scheduler_type in ['cos_fast', 'cos_warmup']:
w_scheduler2.update(epoch_idx , batch_idx/min(len(train_loader), steps_per_epoch))
else:
w_scheduler2.update(epoch_idx, 1.0 * batch_idx / len(train_loader))
def count_ops(arch):
ops = ['none', 'skip_connect', 'nor_conv_1x1', 'nor_conv_3x3', 'avg_pool_3x3']
arch_str = str(arch)
counts = {op: arch_str.count(op) for op in ops}
return counts
def grad_drop(params, p=0.0, arch_param_count=None, p_method=None):
if p == 0:
pass
else:
# NB201 param avg: 0.3985MB
for param in params:
if param.requires_grad and param.grad is not None:
if p_method is None:
torch.nn.functional.dropout(param.grad, p, training = True, inplace = True)
elif p_method == "adaptive":
p = None
else:
raise NotImplementedError
def search_func_old(xloader, network, criterion, scheduler, w_optimizer, a_optimizer, epoch_str, print_freq, algo, logger):
data_time, batch_time = AverageMeter(), AverageMeter()
base_losses, base_top1, base_top5 = AverageMeter(), AverageMeter(), AverageMeter()
arch_losses, arch_top1, arch_top5 = AverageMeter(), AverageMeter(), AverageMeter()
end = time.time()
network.train()
for step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(xloader):
scheduler.update(None, 1.0 * step / len(xloader))
base_inputs = base_inputs.cuda(non_blocking=True)
arch_inputs = arch_inputs.cuda(non_blocking=True)
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# Update the weights
if algo == 'setn':
sampled_arch = network.dync_genotype(True)
network.set_cal_mode('dynamic', sampled_arch)
elif algo == 'gdas':
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif algo == 'random':
network.set_cal_mode('urs', None)
elif algo == 'enas':
with torch.no_grad():
network.controller.eval()
_, _, sampled_arch = network.controller()
network.set_cal_mode('dynamic', sampled_arch)
else:
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
w_optimizer.step()
# record
base_prec1, base_prec5 = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item(), base_inputs.size(0))
base_top1.update (base_prec1.item(), base_inputs.size(0))
base_top5.update (base_prec5.item(), base_inputs.size(0))
# update the architecture-weight
if algo == 'setn':
network.set_cal_mode('joint')
elif algo == 'gdas':
network.set_cal_mode('gdas', None)
elif algo.startswith('darts'):
network.set_cal_mode('joint', None)
elif algo == 'random':
network.set_cal_mode('urs', None)
elif algo != 'enas':
raise ValueError('Invalid algo name : {:}'.format(algo))
network.zero_grad()
if algo == 'darts-v2':
arch_loss, logits = backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets)
a_optimizer.step()
elif algo == 'random' or algo == 'enas':
with torch.no_grad():
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
else:
_, logits = network(arch_inputs)
arch_loss = criterion(logits, arch_targets)
arch_loss.backward()
a_optimizer.step()
# record
arch_prec1, arch_prec5 = obtain_accuracy(logits.data, arch_targets.data, topk=(1, 5))
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_top1.update (arch_prec1.item(), arch_inputs.size(0))
arch_top5.update (arch_prec5.item(), arch_inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if step % print_freq == 0 or step + 1 == len(xloader):
Sstr = '*SEARCH* ' + time_string() + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(xloader))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = 'Base [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=base_losses, top1=base_top1, top5=base_top5)
Astr = 'Arch [Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})]'.format(loss=arch_losses, top1=arch_top1, top5=arch_top5)
logger.log(Sstr + ' ' + Tstr + ' ' + Wstr + ' ' + Astr)
return base_losses.avg, base_top1.avg, base_top5.avg, arch_losses.avg, arch_top1.avg, arch_top5.avg
def train_real(xargs, use_higher_cond, network, fnetwork, criterion, before_rollout_state, logger, all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets, w_optimizer, epoch, data_step, outer_iter, outer_iters):
if use_higher_cond and xargs.higher_loop == "bilevel" and xargs.higher_params == "arch" and xargs.sandwich_computation == "serial" and xargs.meta_algo not in ["reptile", "metaprox"]:
if xargs.refresh_arch_oneshot in ["always", "train_real"]: network.refresh_arch_oneshot = True
for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)):
if inner_step == 1 and xargs.inner_steps_same_batch: # TODO Dont need more than one step of finetuning when using a single batch for the bilevel rollout I think?
break
if xargs.bilevel_train_steps is not None and inner_step >= xargs.bilevel_train_steps:
break
if data_step in [0, 1] and inner_step < 3 and epoch < 5:
logger.log(f"Doing weight training for real in higher_loop={xargs.higher_loop} at inner_step={inner_step}, step={data_step}: target={base_targets[0:10]}")
logger.log(f"Weight-training-for-real check: Original net: {str(list(before_rollout_state['model_init'].parameters())[1])[0:80]}, after-rollout net: {str(list(network.parameters())[1])[0:80]}")
logger.log(f"Arch check: Original net: {str(list(before_rollout_state['model_init'].alphas))[0:80]}, after-rollout net: {str(list(network.alphas))[0:80]}")
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets) * (1 if xargs.sandwich is None else 1/xargs.sandwich)
network.zero_grad()
base_loss.backward()
w_optimizer.step()
if xargs.refresh_arch_oneshot in ["train_real"]: network.refresh_arch_oneshot = True
elif use_higher_cond and xargs.higher_loop == "joint" and xargs.higher_loop_joint_steps is None and xargs.higher_params == "arch" and xargs.sandwich_computation == "serial" and outer_iter == outer_iters - 1 and xargs.meta_algo not in ["reptile", "metaprox"]:
if epoch == 0 and data_step < 3:
logger.log(f"Updating meta-weights by copying from the rollout model")
with torch.no_grad():
for (n1, p1), p2 in zip(network.named_parameters(), fnetwork.parameters()):
if ('arch' not in n1 and 'alpha' not in n1): # Want to copy weights only - the architecture update was done on the original network
p1.data = p2.data
elif use_higher_cond and xargs.higher_loop == "joint" and xargs.higher_loop_joint_steps is not None and xargs.higher_params == "arch" and xargs.sandwich_computation == "serial" and outer_iter == outer_iters - 1 and xargs.meta_algo not in ["reptile", "metaprox"]:
# This branch can be used for GDAS with unrolled SOTL
for inner_step, (base_inputs, base_targets, arch_inputs, arch_targets) in enumerate(zip(all_base_inputs, all_base_targets, all_arch_inputs, all_arch_targets)):
if inner_step >= xargs.higher_loop_joint_steps:
break
if data_step < 2 and inner_step < 3 and epoch < 5:
logger.log(f"Doing weight training for real in higher_loop={xargs.higher_loop} with higher_loop_joint_steps={xargs.higher_loop_joint_steps} at inner_step={inner_step}, step={data_step}: {base_targets[0:10]}")
logger.log(f"Arch check: Original net: {str(list(before_rollout_state['model_init'].alphas))[0:80]}, after-rollout net: {str(list(network.alphas))[0:80]}")
_, logits = network(base_inputs)
base_loss = criterion(logits, base_targets) * (1 if xargs.sandwich is None else 1/xargs.sandwich)
network.zero_grad()
base_loss.backward()
w_optimizer.step()
def get_best_arch_old(train_loader, valid_loader, network, n_samples, algo, logger,
additional_training=True, api=None, style:str='sotl', w_optimizer=None, w_scheduler=None,
config: Dict=None, epochs:int=1, steps_per_epoch:int=100,
val_loss_freq:int=1, overwrite_additional_training:bool=False,
scheduler_type:str=None, xargs=None):
with torch.no_grad():
network.eval()
if algo == 'random':
archs, decision_metrics = network.return_topK(n_samples, True), []
elif algo == 'setn':
archs, decision_metrics = network.return_topK(n_samples, False), []
elif algo.startswith('darts') or algo == 'gdas':
arch = network.genotype
archs, decision_metrics = [arch], []
elif algo == 'enas':
archs, decision_metrics = [], []
for _ in range(n_samples):
_, _, sampled_arch = network.controller()
archs.append(sampled_arch)
else:
raise ValueError('Invalid algorithm name : {:}'.format(algo))
# The true rankings are used to calculate correlations later
true_rankings, final_accs = get_true_rankings(archs, api)
corr_funs = {"kendall": lambda x,y: scipy.stats.kendalltau(x,y).correlation,
"spearman":lambda x,y: scipy.stats.spearmanr(x,y).correlation,
"pearson":lambda x, y: scipy.stats.pearsonr(x,y)[0]}
if steps_per_epoch is not None and steps_per_epoch != "None":
steps_per_epoch = int(steps_per_epoch)
elif steps_per_epoch in [None, "None"]:
steps_per_epoch = len(train_loader)
else:
raise NotImplementedError
if style == 'val_acc':
decision_metrics = calculate_valid_accs(xloader=valid_loader, archs=archs, network=network)
corr_per_dataset = calc_corrs_val(archs=archs, valid_accs=decision_metrics, final_accs=final_accs, true_rankings=true_rankings, corr_funs=corr_funs)
wandb.log(corr_per_dataset)
if style == 'sotl' or style == "sovl":
# Simulate short training rollout to compute SoTL for candidate architectures
cond = logger.path('corr_metrics').exists() and not overwrite_additional_training
metrics_keys = ["sotl", "val", "sovl", "sovalacc", "sotrainacc", "sovalacc_top5", "sotrainacc_top5", "train_losses", "val_losses", "total_val"]
must_restart = False
start_arch_idx = 0
if cond:
logger.log("=> loading checkpoint of the last-checkpoint '{:}' start".format(logger.path('corr_metrics')))
checkpoint = torch.load(logger.path('corr_metrics'))
checkpoint_config = checkpoint["config"] if "config" in checkpoint.keys() else {}
try:
if type(list(checkpoint["metrics"]["sotl"].keys())[0]) is not str:
must_restart = True # will need to restart metrics because using the old checkpoint format
metrics = {k:checkpoint["metrics"][k] if k in checkpoint["metrics"] else {} for k in metrics_keys}
prototype = metrics[metrics_keys[0]]
first_arch = next(iter(metrics[metrics_keys[0]].keys()))
for metric_key in metrics_keys:
if not (len(metrics[metric_key]) == len(prototype) and len(metrics[metric_key][first_arch]) == len(prototype[first_arch])):
must_restart = True
except:
must_restart = True
decision_metrics = checkpoint["decision_metrics"] if "decision_metrics" in checkpoint.keys() else []
start_arch_idx = checkpoint["start_arch_idx"]
cond1={k:v for k,v in checkpoint_config.items() if ('path' not in k and 'dir' not in k and k not in ["dry_run"])}
cond2={k:v for k,v in vars(xargs).items() if ('path' not in k and 'dir' not in k and k not in ["dry_run"])}
logger.log(f"Checkpoint config: {cond1}")
logger.log(f"Newly input config: {cond2}")
if (cond1 == cond2):
logger.log("Both configs are equal.")
else:
logger.log("Checkpoint and current config are not the same! need to restart")
different_items = {k: cond1[k] for k in cond1 if k in cond2 and cond1[k] != cond2[k]}
logger.log(f"Different items are : {different_items}")
if (not cond) or must_restart or (xargs is None) or (cond1 != cond2) or any([len(x) == 0 for x in metrics.values()]): #config should be an ArgParse Namespace
if not cond:
logger.log(f"Did not find a checkpoint for supernet post-training at {logger.path('corr_metrics')}")
else:
logger.log(f"Starting postnet training with fresh metrics")
metrics = {k:{arch.tostr():[[] for _ in range(epochs)] for arch in archs} for k in metrics_keys}
decision_metrics = []
start_arch_idx = 0
train_start_time = time.time()
train_stats = [[] for _ in range(epochs*steps_per_epoch+1)]
for arch_idx, sampled_arch in tqdm(enumerate(archs[start_arch_idx:], start_arch_idx), desc="Iterating over sampled architectures", total = n_samples-start_arch_idx):
network2 = deepcopy(network)
network2.set_cal_mode('dynamic', sampled_arch)
if xargs.lr is not None and scheduler_type is None:
scheduler_type = "constant"
if scheduler_type in ['linear_warmup', 'linear']:
config = config._replace(scheduler=scheduler_type, warmup=1, eta_min=0)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type == "cos_reinit":
# In practice, this leads to constant LR = 0.025 since the original Cosine LR is annealed over 100 epochs and our training schedule is very short
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_adjusted']:
config = config._replace(scheduler='cos', warmup=0, epochs=epochs)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_fast']:
config = config._replace(scheduler='cos', warmup=0, LR=0.001 if xargs.lr is None else xargs.lr, epochs=epochs, eta_min=0)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif scheduler_type in ['cos_warmup']:
config = config._replace(scheduler='cos', warmup=1, LR=0.001 if xargs.lr is None else xargs.lr, epochs=epochs, eta_min=0)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
elif xargs.lr is not None and scheduler_type == 'constant':
config = config._replace(scheduler='constant', constant_lr=xargs.lr)
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
else:
# NOTE in practice, since the Search function uses Cosine LR with T_max that finishes at end of search_func training, this switches to a constant 1e-3 LR.
w_optimizer2, w_scheduler2, criterion = get_optim_scheduler(network2.weights, config)
w_optimizer2.load_state_dict(w_optimizer.state_dict())
w_scheduler2.load_state_dict(w_scheduler.state_dict())
if arch_idx == start_arch_idx: #Should only print it once at the start of training
logger.log(f"Optimizers for the supernet post-training: {w_optimizer2}, {w_scheduler2}")
running_sotl = 0 # TODO implement better SOTL class to make it more adjustible and get rid of this repeated garbage everywhere
running_sovl = 0
running_sovalacc = 0
running_sotrainacc = 0
running_sovalacc_top5 = 0
running_sotrainacc_top5 = 0
_, val_acc_total, _ = valid_func(xloader=valid_loader, network=network2, criterion=criterion, algo=algo, logger=logger)
true_step = 0
arch_str = sampled_arch.tostr()
if steps_per_epoch is None or steps_per_epoch=="None":
steps_per_epoch = len(train_loader)
# q = mp.Queue()
# # This reporting process is necessary due to WANDB technical difficulties. It is used to continuously report train stats from a separate process
# # Otherwise, when a Run is intiated from a Sweep, it is not necessary to log the results to separate training runs. But that it is what we want for the individual arch stats
# p=mp.Process(target=train_stats_reporter, kwargs=dict(queue=q, config=vars(xargs),
# sweep_group=f"Search_Cell_{algo}_arch", sweep_run_name=wandb.run.name or wandb.run.id or "unknown", arch=sampled_arch.tostr()))
# p.start()
for epoch_idx in range(epochs):
if epoch_idx == 0:
metrics["total_val"][arch_str][epoch_idx] = [val_acc_total]*(len(train_loader)-1)
else:
metrics["total_val"][arch_str][epoch_idx] = [metrics["total_val"][arch_str][epoch_idx-1][-1]]*(len(train_loader)-1)
valid_loader_iter = iter(valid_loader) if not additional_training else None # This causes deterministic behavior for validation data since the iterator gets passed in to each function
for batch_idx, data in enumerate(train_loader):
if (steps_per_epoch is not None and steps_per_epoch != "None") and batch_idx > steps_per_epoch:
break
with torch.set_grad_enabled(mode=additional_training):
if scheduler_type in ["linear", "linear_warmup"]:
w_scheduler2.update(epoch_idx, 1.0 * batch_idx / min(len(train_loader), steps_per_epoch))
elif scheduler_type == "cos_adjusted":
w_scheduler2.update(epoch_idx , batch_idx/min(len(train_loader), steps_per_epoch))
elif scheduler_type == "cos_reinit":
w_scheduler2.update(epoch_idx, 0.0)
elif scheduler_type in ['cos_fast', 'cos_warmup']:
w_scheduler2.update(epoch_idx , batch_idx/min(len(train_loader), steps_per_epoch))
else:
w_scheduler2.update(None, 1.0 * batch_idx / len(train_loader))
network2.zero_grad()
inputs, targets = data
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
_, logits = network2(inputs)
train_acc_top1, train_acc_top5 = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
loss = criterion(logits, targets)
if additional_training:
loss.backward()
w_optimizer2.step()
true_step += 1
if batch_idx == 0 or (batch_idx % val_loss_freq == 0):
valid_acc, valid_acc_top5, valid_loss = calculate_valid_acc_single_arch(valid_loader=valid_loader, arch=sampled_arch, network=network2, criterion=criterion, valid_loader_iter=valid_loader_iter)
batch_train_stats = {"lr":w_scheduler2.get_lr()[0], "true_step":true_step, "train_loss":loss.item(), "train_acc_top1":train_acc_top1.item(), "train_acc_top5":train_acc_top5.item(),
"valid_loss":valid_loss, "valid_acc":valid_acc, "valid_acc_top5":valid_acc_top5}
# q.put(batch_train_stats)
train_stats[epoch_idx*steps_per_epoch+batch_idx].append(batch_train_stats)
running_sovl -= valid_loss
running_sovalacc += valid_acc
running_sovalacc_top5 += valid_acc_top5
running_sotl -= loss.item() # Need to have negative loss so that the ordering is consistent with val acc
running_sotrainacc += train_acc_top1.item()
running_sotrainacc_top5 += train_acc_top5.item()
metrics["sotl"][arch_str][epoch_idx].append(running_sotl)
metrics["val"][arch_str][epoch_idx].append(valid_acc)
metrics["sovl"][arch_str][epoch_idx].append(running_sovl)
metrics["sovalacc"][arch_str][epoch_idx].append(running_sovalacc)
metrics["sotrainacc"][arch_str][epoch_idx].append(running_sotrainacc)
metrics["sovalacc_top5"][arch_str][epoch_idx].append(running_sovalacc_top5)
metrics["sotrainacc_top5"][arch_str][epoch_idx].append(running_sotrainacc_top5)
metrics["train_losses"][arch_str][epoch_idx].append(-loss.item())
metrics["val_losses"][arch_str][epoch_idx].append(-valid_loss)
if additional_training:
_, val_acc_total, _ = valid_func(xloader=valid_loader, network=network2, criterion=criterion, algo=algo, logger=logger)
metrics["total_val"][arch_str][epoch_idx].append(val_acc_total)
final_metric = None # Those final/decision metrics are not very useful apart from being a compatibility layer with how get_best_arch worked in the base repo
if style == "sotl":
final_metric = running_sotl
elif style == "sovl":
final_metric = running_sovl
decision_metrics.append(final_metric)
corr_metrics_path = save_checkpoint({"corrs":{}, "metrics":metrics,
"archs":archs, "start_arch_idx": arch_idx+1, "config":vars(xargs), "decision_metrics":decision_metrics},
logger.path('corr_metrics'), logger, quiet=True)
# q.put("SENTINEL") # This lets the Reporter process know it should quit
train_total_time = time.time()-train_start_time
print(f"Train total time: {train_total_time}")
wandb.run.summary["train_total_time"] = train_total_time
original_metrics = deepcopy(metrics)
metrics_FD = {k+"FD": {arch.tostr():SumOfWhatever(measurements=metrics[k][arch.tostr()], e=1).get_time_series(chunked=True, mode="fd") for arch in archs} for k,v in metrics.items() if k in ['val', 'train_losses', 'val_losses']}
metrics.update(metrics_FD)
if epochs > 1:
interim = {} # We need an extra dict to avoid changing the dict's keys during iteration for the R metrics
for key in metrics.keys():
if key in ["train_losses", "train_lossesFD", "val_losses", "val"]:
interim[key+"R"] = {}
for arch in archs:
arr = []
for epoch_idx in range(len(metrics[key][arch.tostr()])):
epoch_arr = []
for batch_metric in metrics[key][arch.tostr()][epoch_idx]:
if key in ["train_losses", "train_lossesFD", "val_losses"]:
sign = -1
else:
sign = 1
epoch_arr.append(sign*batch_metric if epoch_idx == 0 else -1*sign*batch_metric)
arr.append(epoch_arr)
interim[key+"R"][arch.tostr()] = SumOfWhatever(measurements=arr, e=epochs+1, mode='last').get_time_series(chunked=True)
# interim[key+"R"][arch.tostr()] = SumOfWhatever(measurements=[[[batch_metric if epoch_idx == 0 else -batch_metric for batch_metric in batch_metrics] for batch_metrics in metrics[key][arch.tostr()][epoch_idx]]] for epoch_idx in range(len(metrics[key][arch.tostr()])), e=epochs+1).get_time_series(chunked=True)
# print(interim)
# print(metrics["train_lossesFD"])
# print(metrics["train_losses"])
metrics.update(interim)
metrics_E1 = {k+"E1": {arch.tostr():SumOfWhatever(measurements=metrics[k][arch.tostr()], e=1).get_time_series(chunked=True) for arch in archs} for k,v in metrics.items()}
metrics.update(metrics_E1)
else:
# We only calculate Sum-of-FD metrics in this case
metrics_E1 = {k+"E1": {arch.tostr():SumOfWhatever(measurements=metrics[k][arch.tostr()], e=1).get_time_series(chunked=True) for arch in archs} for k,v in metrics.items() if "FD" in k}
metrics.update(metrics_E1)
for key in metrics_FD.keys(): # Remove the pure FD metrics because they are useless anyways
metrics.pop(key, None)
start=time.time()
corrs = {}
to_logs = []
for k,v in tqdm(metrics.items(), desc="Calculating correlations"):
# We cannot do logging synchronously with training becuase we need to know the results of all archs for i-th epoch before we can log correlations for that epoch
corr, to_log = calc_corrs_after_dfs(epochs=epochs, xloader=train_loader, steps_per_epoch=steps_per_epoch, metrics_depth_dim=v,
final_accs = final_accs, archs=archs, true_rankings = true_rankings, corr_funs=corr_funs, prefix=k, api=api, wandb_log=False)
corrs["corrs_"+k] = corr
to_logs.append(to_log)
print(f"Calc corrs time: {time.time()-start}")
if n_samples-start_arch_idx > 0: #If there was training happening - might not be the case if we just loaded checkpoint
# We reshape the stored train statistics so that it is a Seq[Dict[k: summary statistics across all archs for a timestep]] instead of Seq[Seq[Dict[k: train stat for a single arch]]]
processed_train_stats = []
stats_keys = batch_train_stats.keys()
for idx, stats_across_time in tqdm(enumerate(train_stats), desc="Processing train stats"):
agg = {k: np.array([single_train_stats[k] for single_train_stats in stats_across_time]) for k in stats_keys}
agg = {k: {"mean":np.mean(v), "std": np.std(v)} for k,v in agg.items()}
agg["true_step"] = idx
processed_train_stats.append(agg)
for epoch_idx in range(len(to_logs[0])):
relevant_epochs = [to_logs[i][epoch_idx] for i in range(len(to_logs))]
for batch_idx in range(len(relevant_epochs[0])):
relevant_batches = [relevant_epoch[batch_idx] for relevant_epoch in relevant_epochs]
all_batch_data = {}
for batch in relevant_batches:
all_batch_data.update(batch)
# Here we log both the aggregated train statistics and the correlations
if n_samples-start_arch_idx > 0: #If there was training happening - might not be the case if we just loaded checkpoint
all_data_to_log = {**all_batch_data, **processed_train_stats[epoch_idx*steps_per_epoch+batch_idx]}
else:
all_data_to_log = all_batch_data
wandb.log(all_data_to_log)
if style in ["sotl", "sovl"] and n_samples-start_arch_idx > 0: # otherwise, we are just reloading the previous checkpoint so should not save again
corr_metrics_path = save_checkpoint({"metrics":original_metrics, "corrs": corrs,
"archs":archs, "start_arch_idx":arch_idx+1, "config":vars(xargs), "decision_metrics":decision_metrics},
logger.path('corr_metrics'), logger)
try:
wandb.save(str(corr_metrics_path.absolute()))
except:
print("Upload to WANDB failed")
best_idx = np.argmax(decision_metrics)
try:
best_arch, best_valid_acc = archs[best_idx], decision_metrics[best_idx]
except:
logger.log("Failed to get best arch via decision_metrics")
logger.log(f"Decision metrics: {decision_metrics}")
logger.log(f"Best idx: {best_idx}, length of archs: {len(archs)}")
best_arch,best_valid_acc = archs[0], decision_metrics[0]
return best_arch, best_valid_acc
|
py | 1a3d96e0fdef106206fd6bcea7156a6f5de022f9 | """
VHDL Mode for Sublime Text 3
This package attempts to recreate to some level of fidelity the features
in the vhdl-mode in Emacs.
"""
import os
import time
import re
import textwrap
import sublime
import sublime_plugin
#from threading import Thread
from . import vhdl_lang as vhdl
from . import vhdl_util as util
#-------------------------------------------------------------------------------
class vhdlModeVersionCommand(sublime_plugin.TextCommand):
"""
Prints the version to the console.
"""
def run(self, edit):
print("vhdl-mode: VHDL Mode Version 1.8.0")
#-------------------------------------------------------------------------------
class vhdlModeInsertHeaderCommand(sublime_plugin.TextCommand):
"""
This command is used to insert a predefined header into the
current text file.
"""
def run(self, edit):
# Assigning this to a string to keep command shorter later.
template = "Packages/VHDL Mode/Snippets/vhdl-header.sublime-snippet"
# Looking for a name, first the buffer name, then the file name,
# then finally a default value.
buffname = self.view.name()
longname = self.view.file_name()
if buffname:
filename = buffname
elif longname:
# Convert Windows slashes to Unix slashes (if any)
longname = re.sub(r'\\', '/', longname)
namechunks = longname.split('/')
filename = namechunks[len(namechunks)-1]
else:
filename = '<filename>'
# Get the other fields out of settings.
linesize = util.get_vhdl_setting(self, 'vhdl-line-length')
project = util.get_vhdl_setting(self, 'vhdl-project-name')
author = util.get_vhdl_setting(self, 'vhdl-user')
company = util.get_vhdl_setting(self, 'vhdl-company')
platform = util.get_vhdl_setting(self, 'vhdl-platform')
standard = util.get_vhdl_setting(self, 'vhdl-standard')
mtime_prefix = util.get_vhdl_setting(self, 'vhdl-modified-time-string')
use_copyright = util.get_vhdl_setting(self, 'vhdl-use-copyright-block')
use_revision = util.get_vhdl_setting(self, 'vhdl-use-revision-block')
copyright_list = util.get_vhdl_setting(self, 'vhdl-copyright-block')
revision_list = util.get_vhdl_setting(self, 'vhdl-revision-block')
# Set the string to dynamically replace the line field to the chosen
# line length.
linestr = '-'*linesize
# Get the current time and create the modified time string.
date = time.ctime(time.time())
year = time.strftime("%Y",time.localtime())
mod_time = mtime_prefix + date
# Create the copyright block and revision block. Both need
# prefixed newlines because they are optional and the
# snippet field is at the end of the preceding line.
if use_copyright:
copyright = '\n'.join(copyright_list)
copyright = re.sub(r'\${YEAR}', year, copyright)
copyright = re.sub(r'\${COMPANY}', company, copyright)
copyright = re.sub(r'\${LINE}', linestr, copyright)
copyright = '\n' + copyright
else:
copyright = ''
if use_revision:
revision = '\n'.join(revision_list)
revision = re.sub(r'\${LINE}', linestr, revision)
revision = '\n' + revision
else:
revision = ''
# Moving insertion point to the beginning of the file.
bof = self.view.text_point(0,0)
self.view.sel().clear()
self.view.sel().add(sublime.Region(bof))
self.view.show(bof)
# Inserting template/snippet
self.view.run_command("insert_snippet",
{
"name" : template,
"PROJECT" : project,
"FILENAME" : filename,
"AUTHOR" : author,
"COMPANY" : company,
"CDATE" : date,
"MODIFIED_TIME_STRING" : mod_time,
"MDATE" : date,
"YEAR" : year,
"PLATFORM" : platform,
"STANDARD" : standard,
"COPYRIGHT_BLOCK" : copyright,
"REVISION_BLOCK" : revision,
"LINE" : linestr
})
print('vhdl-mode: Inserted header template.')
#-------------------------------------------------------------------------------
class vhdlModeToggleCommentRegionCommand(sublime_plugin.TextCommand):
"""
The command analyzes the block delivered to the command
and attempts to find the leftmost point and uses that for
the location of the commenting characters so that it provides
an even margin and eases removal later.
If the starting line of the region begins with a comment,
the command attempts to remove the comment from that and
each subsequent line.
"""
def run(self, edit):
# This could theoretically run on multiple regions but
# it's not a likely application and I'm just going to
# worry about the first one for now.
region = self.view.sel()[0]
# The line method when applied to a region says it
# returns a new region that is blocked to the
# beginning of the line and the end of the line.
# Exactly what I want, so let's try it.
region = self.view.line(region)
block = self.view.substr(region)
lines = block.split('\n')
# Setting the value to an absurd value for
# comparison. Search for the first non-
# whitespace character to determine the
# left margin.
margin = 1000
for line in lines:
s = re.search(r'\S', line)
if s:
if s.start() < margin:
margin = s.start()
# Check for comment on first line. This
# determines if we're commenting or
# uncommenting.
comment = True
s = re.search(r'^\s*--', lines[0])
if s:
comment = False
# Process lines.
for index, line in enumerate(lines):
if comment:
lines[index] = lines[index][0:margin] + \
'--' + \
lines[index][margin:]
else:
# Assuming this is a commented block, we replace
# only the first comment designator. Weird things
# will happen if there are uncommented lines in the
# block and there's also inline comments.
lines[index] = re.sub('--', '', lines[index], 1)
# Put together into big string
block = '\n'.join(lines)
#print(block)
# Replace the current region with the new region
self.view.replace(edit, region, block)
#-------------------------------------------------------------------------------
class vhdlModeBeautifyBufferCommand(sublime_plugin.TextCommand):
"""
This is a Sublime Text variation of the standalone beautify
code program. Sets the region to the entire buffer, obtains
the lines, then processes them and writes them back.
"""
def run(self, edit):
# Finding the current view and location of the point.
x, y = self.view.viewport_position()
row, col = self.view.rowcol(self.view.sel()[0].begin())
#print('vhdl-mode: x={}, y={}, row={}, col={}'.format(x, y, row, col))
# Create points for a region that define beginning and end.
begin = 0
end = self.view.size()-1
# Slurp up entire buffer and create CodeBlock object
whole_region = sublime.Region(begin, end)
buffer_str = self.view.substr(whole_region)
cb = vhdl.CodeBlock.from_block(buffer_str)
# Get the scope for each line. There's commented out code here for
# which scope to get first column of the line, and first character of
# the line. The first column seems to give the best results, though
# there are anomalies (like a when <choice> => followed by a line that
# uses => as a discrete member group assignment).
point = 0
scope_list = []
while not util.is_end_line(self, point):
#point = util.move_to_1st_char(self, point)
scope_list.append(self.view.scope_name(point))
#point = util.move_to_bol(self, point)
point = util.move_down(self, point)
scope_list.append(self.view.scope_name(point))
# Process the block of code. Prep pads symbols and removes extra
# spaces.
cb.prep()
cb.left_justify()
# Do the initial alignment after justification.
print('vhdl-mode: Pre-indent symbol alignment.')
cb.align_symbol(r':(?!=)', 'pre', scope_list)
cb.align_symbol(r':(?!=)\s?(?:in\b|out\b|inout\b|buffer\b)?\s*', 'post', scope_list)
cb.align_symbol(r'<(?==)|:(?==)', 'pre', scope_list)
cb.align_symbol(r'=>', 'pre', scope_list)
# Indent! Get some settings first.
use_spaces = util.get_vhdl_setting(self, 'translate_tabs_to_spaces')
tab_size = util.get_vhdl_setting(self, 'tab_size')
print('vhdl-mode: Indenting.')
cb.indent_vhdl(0, tab_size, use_spaces)
# Post indent alignment
print('vhdl-mode: Post-indent symbol alignment.')
cb.align_symbol(r'\bwhen\b', 'pre', scope_list)
print('vhdl-mode: Aligning comments.')
cb.align_comments(tab_size, use_spaces)
# Recombine into one big blobbed string.
buffer_str = cb.to_block()
# Annnd if all went well, write it back into the buffer
self.view.replace(edit, whole_region, buffer_str)
# New replacement routine that does not trigger Sublime's
# repainting mechanism that seems to be triggered by using
# self.view.replace()
#self.view.run_command("select_all")
#self.view.run_command("left_delete")
#self.view.run_command("append", {"characters": buffer_str})
# Restore the view.
original_point = self.view.text_point(row, col)
util.set_cursor(self, original_point)
# Trying out another method for handling the viewport. You can have
# a zero value for set_timeout() delay so this executes after the
# command exits.
restore = lambda: self.view.set_viewport_position((x, y), False)
sublime.set_timeout(restore, 0)
#self.view.set_viewport_position((x, y), False)
#-------------------------------------------------------------------------------
class vhdlModeUpdateLastUpdatedCommand(sublime_plugin.TextCommand):
"""
Finds the last updated field in the header and updates the time
in the field.
"""
def run(self, edit):
"""Sublime Text plugin run method."""
# Note, if one changes the header, this might need to change too.
pattern = util.get_vhdl_setting(self, 'vhdl-modified-time-string')
region = self.view.find(pattern, 0)
#print('Region Diagnostics')
#print('------------------')
#print('Begin: {}'.format(region.begin()))
#print('End: {}'.format(region.end()))
#print('Empty? {}'.format(region.empty()))
if not region.empty():
region = self.view.line(region)
date = time.ctime(time.time())
new_mtime = pattern + '{}'.format(date)
self.view.replace(edit, region, new_mtime)
print('vhdl-mode: Updated last modified time.')
else:
print('vhdl-mode: No last modified time field found.')
#-------------------------------------------------------------------------------
class vhdlModeUpdateModifiedTimeOnSave(sublime_plugin.EventListener):
"""
Watches for a save event and updates the Last update
field in the header.
"""
def on_pre_save(self, view):
"""
Gets passed the view that is being saved and scans for the
Last updated field.
"""
# MUST CHECK FOR VHDL FILE TYPE (otherwise it
# starts executing on this very source file which
# is problematic!)
if util.is_vhdl_file(view.scope_name(0)):
view.run_command("vhdl_mode_update_last_updated")
#-------------------------------------------------------------------------------
class vhdlModeScopeSnifferCommand(sublime_plugin.TextCommand):
"""
My own scope sniffing command that prints to
console instead of a popup window.
"""
def run(self, edit):
"""ST3 Run Method"""
region = self.view.sel()[0]
sniff_point = region.begin()
print('vhdl-mode: {}'.format(self.view.scope_name(sniff_point)))
#-------------------------------------------------------------------------------
class vhdlModeInsertCommentLine(sublime_plugin.TextCommand):
"""
This should insert a line out to the margin (80 characters)
starting where the point is. This is intended to run after
the user types '---' (see keybindings)
"""
def run(self, edit):
"""Standard TextCommand Run method"""
# Get the current point.
region = self.view.sel()[0]
original_point = region.begin()
point_r, point_c = self.view.rowcol(original_point)
# Figure out if any tab characters were used.
line = self.view.substr(self.view.line(original_point))
numtabs = line.count('\t')
# Get the current tab size and line length.
tabsize = util.get_vhdl_setting(self, 'tab_size')
linesize = util.get_vhdl_setting(self, 'vhdl-line-length')
# Create string of correct amount of dashes. A tab consumed
# one character but generates tabsize-1 space.
numdash = linesize-point_c-(tabsize-1)*numtabs
if numdash <= 2:
print('vhdl-mode: Warning: Line length setting violation. Setting number of dashes to 2.')
numdash = 2
line = '-'*numdash
num_chars = self.view.insert(edit, original_point, line)
print('vhdl-mode: Inserted comment line.')
#-------------------------------------------------------------------------------
class vhdlModeInsertCommentBox(sublime_plugin.TextCommand):
"""
This should insert a box out to the margin (80 characters)
starting where the point is, and taking into account tabs.
This is intended to run after the user types '----' (see
keybindings)
"""
def run(self, edit):
"""Standard TextCommand Run method"""
# Get the current point.
region = self.view.sel()[0]
original_point = region.begin()
point_r, point_c = self.view.rowcol(original_point)
# Figure out if any tab characters were used.
line = self.view.substr(self.view.line(original_point))
numtabs = line.count('\t')
# Get the current tab size
tabsize = util.get_vhdl_setting(self, 'tab_size')
linesize = util.get_vhdl_setting(self, 'vhdl-line-length')
# Create string of correct amount of dashes. A tab consumed
# one character but generates tabsize-1 space.
numdash = linesize-point_c-(tabsize-1)*numtabs
if numdash <= 2:
print('vhdl-mode: Warning: Line length setting violation. Setting number of dashes to 2.')
numdash = 2
line = '-'*numdash
# Create snippet object.
snippet = line + '\n' + '-- $0' + '\n' + line + '\n'
# Inserting template/snippet
self.view.run_command("insert_snippet",
{
"contents" : snippet
})
print('vhdl-mode: Inserted comment box.')
#-------------------------------------------------------------------------------
class vhdlModeSettingSniffer(sublime_plugin.TextCommand):
'''
Creating a command to check settings in various
contexts
'''
def run(self, edit):
'''
Standard TextCommand Run Method
'''
print('Preference Settings')
print('vhdl-mode: {}: {}'.format('tab_size', util.get_vhdl_setting(self, 'tab_size')))
print('vhdl-mode: {}: {}'.format('translate_tabs_to_spaces', util.get_vhdl_setting(self, 'translate_tabs_to_spaces')))
vhdl_settings = sublime.load_settings('vhdl_mode.sublime-settings')
keys = ['vhdl-line-length',
'vhdl-user',
'vhdl-company',
'vhdl-project-name',
'vhdl-platform',
'vhdl-standard',
'vhdl-modified-time-string',
'vhdl-use-copyright-block',
'vhdl-use-revision-block',
'vhdl-copyright-block',
'vhdl-revision-block']
print('Package Settings')
for key in keys:
print('vhdl-mode: {}: "{}"'.format(key, vhdl_settings.get(key)))
print('View Settings')
for key in keys:
print('vhdl-mode: {}: {}'.format(key, util.get_vhdl_setting(self, key)))
#-------------------------------------------------------------------------------
class vhdlModeViewportSniffer(sublime_plugin.TextCommand):
def run(self, edit):
x, y = self.view.viewport_position()
print('vhdl-mode: Viewport X: {} Y: {}'.format(x,y))
#self.view.set_viewport_position((0, y), False)
|
py | 1a3d9a75d48f2457061a0be93c9a9dffa02ebff9 | """
Data structures required for our testing.
"""
import os
import shutil
from wsgi_intercept import httplib2_intercept
import wsgi_intercept
from tiddlyweb.web.serve import load_app
from tiddlyweb.model.collections import Tiddlers
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.recipe import Recipe
from tiddlyweb.config import config
from tiddlyweb.store import Store
config['server_host'] = {
'scheme': 'http',
'host': 'our_test_domain',
'port': '8001',
}
def initialize_app():
app = load_app()
def app_fn():
return app
httplib2_intercept.install()
wsgi_intercept.add_wsgi_intercept('our_test_domain', 8001, app_fn)
TiddlerOne = Tiddler('TiddlerOne')
TiddlerOne.modifier = 'AuthorOne'
TiddlerOne.text = u'c tiddler one content'
TiddlerOne.tags = ['tagone', 'tagtwo']
TiddlerTwo = Tiddler('TiddlerTwo')
TiddlerTwo.modifier = u'AuthorTwo'
TiddlerTwo.text = u'b tiddler two content'
TiddlerThree = Tiddler('TiddlerThree')
TiddlerThree.modifier = u'AuthorThree'
TiddlerThree.text = u'a tiddler three content'
TiddlerThree.tags = [u'tagone', u'tagthree']
tiddlers = [TiddlerOne, TiddlerTwo, TiddlerThree]
bagone = Bag(name='bagone')
container = Tiddlers()
container.add(tiddlers[0])
bagone.tiddlers = container
bagtwo = Bag(name='bagtwo')
container = Tiddlers()
container.add(tiddlers[1])
bagtwo.tiddlers = container
bagthree = Bag(name='bagthree')
container = Tiddlers()
container.add(tiddlers[2])
bagthree.tiddlers = container
bagfour = Bag(name='bagfour')
container = Tiddlers()
for tiddler in tiddlers:
container.add(tiddler)
bagfour.tiddlers = container
tiddler_collection = Tiddlers()
for tiddler in tiddlers:
tiddler.bag = u'bagfour'
tiddler_collection.add(tiddler)
recipe_list = [
(bagone, u'select=title:TiddlerOne'),
(bagtwo, u'select=title:TiddlerTwo'),
(bagthree, u'select=tag:tagone;select=tag:tagthree')
]
recipe_list_string = [
[u'bagone', u'select=title:TiddlerOne'],
[u'bagtwo', u'select=title:TiddlerTwo'],
[u'bagthree', u'select=tag:tagone;select=tag:tagthree']
]
def _teststore():
return Store(config['server_store'][0], config['server_store'][1],
environ={'tiddlyweb.config': config})
def reset_textstore():
if os.path.exists('store'):
shutil.rmtree('store')
def muchdata(store):
for bag_numeral in range(30):
bag = create_bag(store, bag_numeral)
for tiddler_numeral in range(10):
tiddler = create_tiddler(store, bag, tiddler_numeral)
recipe = Recipe('long')
recipe_list = [(u'bag1', '')]
for numeral in range(0, 30, 2):
bag_name = u'bag%s' % numeral
filter_string = u'select=title:tiddler%s' % (numeral % 10)
if not (numeral % 10) % 3:
filter_string = filter_string + u';select=tag:tag three'
recipe_list.append([bag_name, filter_string])
recipe.set_recipe(recipe_list)
store.put(recipe)
def create_tiddler(store, bag, numeral):
tiddler = Tiddler('tiddler%s' % numeral)
tiddler.bag = bag.name
tiddler.text = u'i am tiddler %s' % numeral
tags = [u'basic tag']
if not numeral % 2:
tags.append(u'tagtwo')
if not numeral % 3:
tags.append(u'tagthree')
if not numeral % 4:
tags.append(u'tagfour')
tiddler.tags = tags
if tiddler.title == 'tiddler8':
tiddler.modified = '200805230303'
store.put(tiddler)
def create_bag(store, numeral):
bag = Bag('bag%s' % numeral)
store.put(bag)
return bag
|
py | 1a3d9a81544de3b92c3ce99789b09a712b9615b4 | #!/Users/marc/miniconda3/bin/python3
import math
import numpy as np
def sphere_vertices( n ):
phistep = math.pi / n
thetastep = 2*math.pi / n
vertices = []
for i in range( n+1 ):
phi = - math.pi/2 + i * phistep
if i == 0:
tb = 'bottom'
elif i==n:
tb = 'top'
else:
tb = False
for j in range( n ):
theta = j * thetastep
face = sphere_face( phi, theta, phi+phistep, theta+thetastep, tb )
vertices.extend( face )
#vertices = [item for sublist in vertices for item in sublist]
return np.array( vertices, dtype=np.float32)
def sphere_face( phi0, theta0, phi1, theta1, tb ):
x0 = .5*math.cos(theta0) * math.cos(phi0)
x1 = .5*math.cos(theta0) * math.cos(phi1)
x2 = .5*math.cos(theta1) * math.cos(phi1)
x3 = .5*math.cos(theta1) * math.cos(phi0)
y0 = .5*math.sin(theta0) * math.cos(phi0)
y1 = .5*math.sin(theta0) * math.cos(phi1)
y2 = .5*math.sin(theta1) * math.cos(phi1)
y3 = .5*math.sin(theta1) * math.cos(phi0)
z0 = .5*math.sin(phi0)
z1 = .5*math.sin(phi1)
if tb == 'bottom':
return [ x0,y0,z0, theta0/(2*math.pi), (phi0+math.pi/2)/math.pi,
x1,y1,z1, theta0/(2*math.pi), (phi1+math.pi/2)/math.pi,
x2,y2,z1, theta1/(2*math.pi), (phi1+math.pi/2)/math.pi, ]
elif tb == 'top':
return [ x0,y0,z0, theta0/(2*math.pi), (phi0+math.pi/2)/math.pi,
x3,y3,z0, theta1/(2*math.pi), (phi0+math.pi/2)/math.pi,
x2,y2,z1, theta1/(2*math.pi), (phi1+math.pi/2)/math.pi ]
else:
return [x0,y0,z0, theta0/(2*math.pi), (phi0+math.pi/2)/math.pi,
x1,y1,z1, theta0/(2*math.pi), (phi1+math.pi/2)/math.pi,
x2,y2,z1, theta1/(2*math.pi), (phi1+math.pi/2)/math.pi,
x0,y0,z0, theta0/(2*math.pi), (phi0+math.pi/2)/math.pi,
x3,y3,z0, theta1/(2*math.pi), (phi0+math.pi/2)/math.pi,
x2,y2,z1, theta1/(2*math.pi), (phi1+math.pi/2)/math.pi ]
if __name__ == "__main__":
sphere = sphere_vertices( 3 )
np.set_printoptions(precision=3, suppress=True, linewidth=110)
print(sphere)
print("Faces: ", len(sphere)/3)
|
py | 1a3d9af03b4a7e8941c4c7e45813cfc5b8e77499 | #!/usr/bin/env python
import os
import subprocess
import re
import time
import json
from charmhelpers.core import hookenv
from charmhelpers.core.host import get_nic_mtu, service_start, service_running
from charmhelpers.fetch import apt_install
class Lldp():
lldp_out = '/home/ubuntu/lldp_output.json'
enabled = False
parsed_data = None
def install(self):
apt_install("lldpd")
def disable_i40e_lldp_agent(self):
path = '/sys/kernel/debug/i40e'
if os.path.isdir(path):
hookenv.log('Disabling NIC internal LLDP agent','INFO')
for r,dirs,files in os.walk(path):
for d in dirs:
with open("{}/{}/command".format(path,d),"w") as fh:
fh.write('lldp stop')
def enable(self):
if not service_running('lldpd'):
self.disable_i40e_lldp_agent()
service_start('lldpd')
hookenv.log('Waiting to collect LLDP data','INFO')
time.sleep(30)
enabled=True
def collect_data(self):
cmd = "lldpcli show neighbors details -f json | tee " + self.lldp_out
os.system(cmd)
def data(self):
if not self.parsed_data:
with open(self.lldp_out, 'r') as f:
self.parsed_data = json.load(f)
return self.parsed_data
def get_interface(self,iface):
for i in self.data()['lldp']['interface']:
if iface in i:
return i[iface]
return None
def get_interface_vlan(self,iface):
try:
return self.get_interface(iface)['vlan']['vlan-id']
except (KeyError,TypeError):
hookenv.log('No LLDP data for {}'.format(iface),'INFO')
return None
def get_interface_port_descr(self,iface):
try:
return self.get_interface(iface)['port']['descr']
except (KeyError,TypeError):
hookenv.log('No LLDP data for {}'.format(iface),'INFO')
return None
class Iperf():
"""
Install and start a server automatically
"""
iperf_out = '/home/ubuntu/iperf_output.txt'
def install_iperf(self):
apt_install("iperf")
def listen(self):
cmd = "iperf -s -m -fm | tee " + self.iperf_out + " &"
os.system(cmd)
def mtu(self):
with open(self.iperf_out) as f:
for line in f.readlines():
if "MTU" in line:
match = line
try:
return match.split('MTU', 4)[1].split(' ')[1]
except UnboundLocalError:
return "no iperf test results: failed"
def speed(self):
with open(self.iperf_out) as f:
for line in f.readlines():
if "bits" in line:
match = line
try:
return match.rsplit(' ', 2)[1]
except UnboundLocalError:
return "no iperf test results: failed"
def selfcheck(self):
subprocess.check_output(["iperf", "-c", "localhost", "-t", "1"])
def hostcheck(self, nodes):
# Wait for other nodes to start their servers...
for node in nodes:
msg = "checking iperf on {}".format(node[1])
hookenv.log(msg)
cmd = "iperf -t1 -c {}".format(node[1])
os.system(cmd)
def safe_status(workload, status):
cfg = hookenv.config()
if not cfg.get('supress_status'):
hookenv.status_set(workload, status)
def ping(input, ping_time, ping_tries):
ping_string = "ping -c {} -w {} {} > /dev/null 2>&1"\
.format(ping_tries, ping_time, input)
hookenv.log('Ping command: {}'.format(ping_string), 'DEBUG')
response = os.system(ping_string)
if response == 0:
return 0
else:
return 1
def check_local_hostname():
local_hostname = subprocess.check_output('hostname', shell=True)\
.decode('utf-8').rstrip()
lookup_cmd = "getent hosts {}".format(local_hostname)
hookenv.log('Looking up local hostname: {}'.format(local_hostname))
try:
result = subprocess.check_output(lookup_cmd, shell=True)\
.decode('utf-8').rstrip()
result = ''
stderr = 0
except subprocess.CalledProcessError as exc:
result = local_hostname
stderr = exc.returncode
return result, stderr
def check_local_mtu(required_mtu, iface_mtu):
if required_mtu == 0:
return 0
elif 0 <= (int(iface_mtu) - int(required_mtu)) <= 12:
return 100
else:
return 200
def check_min_speed(min_speed, iperf_speed):
if min_speed == 0:
return 0
elif min_speed <= iperf_speed:
return 100
elif min_speed > iperf_speed:
return 200
def check_port_description(lldp):
iface_dir = "/sys/class/net"
status=None
local_hostname = subprocess.check_output('hostname', shell=True)\
.decode('utf-8').rstrip()
for r,dirs,files in os.walk(iface_dir):
for d in dirs:
if d == 'lo':
continue
if d.startswith('vnet'):
continue
if d.startswith('veth'):
continue
if check_iface_type(d) == 'eth':
if not check_iface_down(d):
desc = lldp.get_interface_port_descr(d)
hookenv.log("Port {} description {}".format(d,desc),
'INFO')
if desc:
if not re.search(local_hostname,desc):
if status:
status="{} {}:{}"\
.format(status,d,desc)
else:
status="{}:{}".format(d,desc)
if status:
return "ports failed: {}".format(status)
else:
return "ports ok"
def check_iface_type(iface):
iface_dir = "/sys/class/net/{}".format(iface)
with open("{}/uevent".format(iface_dir)) as fos:
content = fos.read()
if re.search('DEVTYPE', content):
return "complex"
return 'eth'
def check_iface_down(iface):
iface_dir = "/sys/class/net/{}".format(iface)
with open("{}/operstate".format(iface_dir)) as fos:
content = fos.read()
if not re.search('up', content):
return "down"
with open("{}/carrier".format(iface_dir)) as fos:
content = fos.read()
if not re.search('1', content):
return "down"
return None
def check_bond(bond,lldp=None):
bond_path = "/sys/class/net/{}".format(bond)
if not os.path.isdir( bond_path ):
return "missing"
if check_iface_down(bond):
return "down"
with open("{}/bonding/slaves".format(bond_path)) as fos:
content = fos.read()
vlan=None
for slave in content.split():
if check_iface_down(slave):
return "{} down".format(slave)
if lldp:
if vlan:
if not vlan == lldp.get_interface_vlan(slave):
return "vlan mismatch"
else:
vlan = lldp.get_interface_vlan(slave)
return None
def check_bonds(bonds,lldp=None):
bonds_status=None
for bond in [b.strip() for b in bonds.split(',')]:
bond_status = check_bond(bond,lldp)
if bond_status:
if bonds_status:
bonds_status="{} {}:{}".format(bonds_status,bond,bond_status)
else:
bonds_status="{}:{}".format(bond,bond_status)
if bonds_status:
return "bonds failed: {}".format(bonds_status)
else:
return "bonds ok"
def check_nodes(nodes, iperf_client=False):
cfg = hookenv.config()
local_ip = hookenv.unit_private_ip()
iface_lines = subprocess.check_output(["ip", "route", "show", "to", "match", local_ip]).decode()
iface_lines = iface_lines.split('\n')
for line in iface_lines:
if re.match('.* via .*', line) is None:
break
primary_iface = str(line).split('dev')[1].split(' ')[1]
iface_mtu = get_nic_mtu(primary_iface)
required_mtu = cfg.get('required_mtu')
min_speed = cfg.get('min_speed')
msg = "MTU for iface: {} is {}".format(primary_iface, iface_mtu)
hookenv.log(msg, 'INFO')
#if required_mtu != 0 and not 0 <= (int(iface_mtu) - int(required_mtu)) <= 12:
# iperf_status = ", local mtu check failed, required_mtu: {}, iface mtu: {}".format(required_mtu, iface_mtu)
#elif required_mtu == 0 or 0 <= (int(iface_mtu) - int(required_mtu)) <= 12:
port_status=""
lldp = None
if cfg.get('use_lldp'):
lldp = Lldp()
lldp.enable()
lldp.collect_data()
if cfg.get('check_port_description'):
port_status = "{}, ".format(check_port_description(lldp))
cfg_check_bonds = cfg.get('check_bonds',lldp)
bond_status=""
if cfg_check_bonds:
bond_status = "{}, ".format(check_bonds(cfg_check_bonds,lldp))
cfg_check_iperf = cfg.get('check_iperf')
if cfg_check_iperf:
hookenv.log("Running iperf test", 'INFO')
if not iperf_client:
iperf = Iperf()
mtu = iperf.mtu()
speed = iperf.speed()
# Make space for 8 or 12 byte variable overhead (TCP options)
if "failed" not in mtu:
if 0 <= (int(iface_mtu) - int(mtu)) <= 12:
iperf_status = ", net mtu ok: {}".format(iface_mtu)
else:
iperf_status = ", net mtu failed, mismatch: {} packet vs {} on iface {}".format(
mtu, iface_mtu, primary_iface)
else:
iperf_status = ", network mtu check failed"
if "failed" not in speed:
if check_min_speed(min_speed, float(speed)) == 0:
iperf_status = iperf_status + ", {} mbit/s".format(speed)
if check_min_speed(min_speed, float(speed)) == 100:
iperf_status = iperf_status + ", speed ok: {} mbit/s".format(speed)
if check_min_speed(min_speed, float(speed)) == 200:
iperf_status = iperf_status + ", speed failed: {} < {} mbit/s".format(speed, str(min_speed))
else:
iperf_status = iperf_status + ", iperf speed check failed"
elif iperf_client:
iperf_status = ", iperf leader, mtu: {}".format(iface_mtu)
iperf = Iperf()
iperf.hostcheck(nodes)
else:
iperf_status = ""
if check_local_mtu(required_mtu, iface_mtu) == 100:
iperf_status = iperf_status + ", local mtu ok, required: {}".format(required_mtu)
elif check_local_mtu(required_mtu, iface_mtu) == 200:
iperf_status = iperf_status + ", local mtu failed, required: {}, iface: {}".format(required_mtu, iface_mtu)
hookenv.log('doing other things after iperf', 'INFO')
cfg_check_local_hostname = cfg.get('check_local_hostname')
if cfg_check_local_hostname:
no_hostname = check_local_hostname()
if no_hostname[0] == '':
no_hostname = ', local hostname ok'
hookenv.log('Local hostname lookup OK: {}'.format(
str(no_hostname)), 'INFO')
else:
no_hostname = ', local hostname failed'
hookenv.log('Local hostname lookup FAILED: {}'.format(
str(no_hostname)), 'ERROR')
no_ping = check_ping(nodes)
cfg_check_dns = cfg.get('check_dns')
if cfg_check_dns:
no_dns = check_dns(nodes)
hookenv.log("Units with DNS problems: " + str(no_dns))
try:
dns_status
except NameError:
dns_status = ''
else:
dns_status = ''
no_dns = ([], [], [])
try:
dns_status
except NameError:
dns_status = ''
if not no_ping:
no_ping = 'icmp ok'
else:
no_ping = 'icmp failed: ' + str(no_ping)
if no_dns == ([], [], []):
dns_status = ', dns ok'
else:
no_rev = no_dns[0]
no_fwd = no_dns[1]
no_match = no_dns[2]
if no_match != []:
dns_status = ', match dns failed: ' + str(no_match)
else:
if no_rev:
no_rev = ', rev dns failed: ' + str(no_rev)
if no_fwd:
no_fwd = ', fwd dns failed: ' + str(no_fwd)
if no_rev == []:
no_rev = ''
if no_fwd == []:
no_fwd = ''
dns_status = '{}{}{}'\
.format(dns_status, str(no_rev), str(no_fwd))
if cfg_check_local_hostname:
check_status = '{}{}{}{}{}{}'.format(
port_status,bond_status,no_ping,
str(no_hostname), str(dns_status), str(iperf_status))
else:
check_status = '{}{}{}{}{}'.format(
port_status,bond_status,no_ping,
str(dns_status), str(iperf_status))
if 'failed' in check_status:
workload = 'blocked'
else:
workload = 'active'
safe_status(workload, check_status)
reactive_state = {'icmp': no_ping, 'dns': dns_status}
return reactive_state
def check_ping(nodes):
cfg = hookenv.config()
ping_time = cfg.get('ping_timeout')
ping_tries = cfg.get('ping_tries')
try:
unreachable
except NameError:
unreachable = []
for node in nodes:
unit_id = node[0].split('/')[1]
hookenv.log('Pinging unit_id: ' + str(unit_id), 'INFO')
if ping(node[1], ping_time, ping_tries) == 1:
hookenv.log('Ping FAILED for unit_id: ' + str(unit_id), 'ERROR')
if unit_id not in unreachable:
unreachable.append(unit_id)
else:
hookenv.log('Ping OK for unit_id: ' + str(unit_id), 'INFO')
if unit_id in unreachable:
unreachable.remove(unit_id)
return unreachable
def check_dns(nodes):
cfg = hookenv.config()
dns_server = cfg.get('dns_server')
dns_tries = cfg.get('dns_tries')
dns_time = cfg.get('dns_time')
try:
norev
except NameError:
norev = []
try:
nofwd
except NameError:
nofwd = []
try:
nomatch
except NameError:
nomatch = []
hookenv.log("DNS (ALL NODES): {}".format(nodes))
for node in nodes:
ip = node[1]
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
hookenv.log("private-address appears to be a hostname: {},"
" attempting forward lookup...", 'WARN')
ip = forward_dns(ip, dns_server, dns_tries, dns_time)[0]
else:
hookenv.log('private-address appears to be an IP', 'INFO')
unit_id = node[0].split('/')[1]
hookenv.log("Reverse lookup for ip: {}, node: {},"
" unit_id: {}".format(ip, node[0], unit_id), 'INFO')
reverse, r_stderr = reverse_dns(ip, dns_server, dns_tries, dns_time)
hookenv.log("Reverse result for unit_id: {}, hostname: {},"
" exitcode: {}".format(unit_id, str(reverse),
str(r_stderr)))
if r_stderr:
hookenv.log("Reverse FAILED for"
" unit_id: {}".format(unit_id), 'ERROR')
if unit_id not in norev:
norev.append(unit_id)
continue
else:
hookenv.log("Reverse OK for unit_id: {}".format(unit_id), 'INFO')
if unit_id in norev:
norev.remove(unit_id)
hookenv.log("Forward lookup for hostname: {}, node: {},"
" unit_id: {}".format(str(reverse), node[0], unit_id),
'INFO')
for rev in reverse.split():
forward, f_stderr = forward_dns(rev, dns_server,
dns_tries, dns_time)
hookenv.log("Forward result for unit_id: {}, ip: {},"
" exitcode: {}".format(unit_id, forward,
str(f_stderr)))
if f_stderr:
hookenv.log("Forward FAILED for"
" unit_id: {}".format(unit_id), 'ERROR')
if unit_id not in nofwd:
nofwd.append(unit_id)
else:
hookenv.log("Forward OK for"
" unit_id: {}".format(unit_id), 'INFO')
if unit_id in nofwd:
nofwd.remove(unit_id)
if ip != forward:
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$",
forward):
forward = "Can not resolve hostname to IP {}"\
.format(repr(forward))
hookenv.log("Original IP and Forward MATCH FAILED for"
" unit_id: {}, Original: {}, Forward: {}"
.format(unit_id, ip, forward), 'ERROR')
if unit_id not in nomatch:
nomatch.append(unit_id)
else:
hookenv.log("Original IP and Forward MATCH OK for unit_id:"
" {}, Original: {}, Forward: {}"
.format(unit_id, ip, forward),
'INFO')
if unit_id in nomatch:
nomatch.remove(unit_id)
break
return norev, nofwd, nomatch
def reverse_dns(input, dns_server, tries, timeout):
cmd = '/usr/bin/dig -x ' + input + ' +short +tries={} +time={}'\
.format(tries, timeout)
if dns_server:
cmd = '{} @{}'.format(cmd, dns_server)
hookenv.log('DNS Reverse command: {}'.format(cmd), 'DEBUG')
try:
result = subprocess.check_output(cmd, shell=True)\
.decode('utf-8').rstrip()
stderr = 0
except subprocess.CalledProcessError as exc:
result = "Reverse DNS lookup error: " + str(exc.output)
stderr = exc.returncode
if result == '':
result = 'No reverse response'
stderr = 1
return result, stderr
def forward_dns(input, dns_server, tries, timeout):
cmd = '/usr/bin/dig ' + input + ' +short +tries={} +time={}'\
.format(tries, timeout)
if dns_server:
cmd = '{} @{}'.format(cmd, dns_server)
hookenv.log('DNS Forward command: {}'.format(cmd), 'DEBUG')
try:
result = subprocess.check_output(cmd, shell=True)\
.decode('utf-8').rstrip()
stderr = 0
except subprocess.CalledProcessError as exc:
result = "Forward DNS lookup error: " + str(exc.output)
stderr = exc.returncode
if result == '':
result = 'No forward response'
stderr = 1
return result, stderr
|
py | 1a3d9aff249c4f1062d56327746c99603b34233f | # -*- coding: utf-8 -*-
import pytest
import datetime
from web.processors.event import create_or_update_event
@pytest.mark.django_db
def test_unknown_URL(db, client):
response = client.get('/bar-foo/')
assert response.status_code == 404
@pytest.mark.django_db
def test_country_redirect(db, client):
# Test without a slash in the end
response = client.get('/AB')
assert response.status_code == 301
assert response['Location'][-5:] == '/#!AB'
# and with one
response = client.get('/AB/')
assert response.status_code == 301
assert response['Location'][-5:] == '/#!AB'
@pytest.mark.django_db
def test_view_just_id(admin_user, db, client):
event_data = {
'audience': [3],
'theme': [1, 2],
'contact_person': u'[email protected]',
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet',
'event_url': u'',
'location': u'Ljubljana, Slovenia',
'organizer': u'CodeCatz test',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'Redirect Test',
}
test_event = create_or_update_event(event_id=None, **event_data)
# Test without a slash in the end
response = client.get('/view/1')
assert response.status_code == 301
# Test with a slash in the end
response = client.get('/view/1/')
assert response.status_code == 302
|
py | 1a3d9b0a0571c7a1ee61b4c7a1c58d627f8e44e2 | import json
import os
from typing import Union
from pathlib import Path
from jsonschema import RefResolver, Draft7Validator
from aqt import mw
from aqt.qt import QWidget, QLabel, Qt
from ...lib.config import serialize_setting, deserialize_setting
from ...lib.config_types import TWConcrScript, TWMetaScript
from ...lib.registrar import get_interface
from ..tw_script_tab_ui import Ui_TWWrapTab
from .tw_setting_add_replace import TWSettingAddReplace
from .tw_script_config import TWScriptConfig
from .util import mapTruthValueToIcon
class TWScriptTab(QWidget):
def __init__(self, main):
super().__init__()
self.ui = Ui_TWWrapTab()
self.ui.setupUi(self)
self.ui.addPushButton.clicked.connect(self.addScript)
self.ui.deletePushButton.clicked.connect(self.deleteScript)
self.ui.downPushButton.clicked.connect(self.moveDown)
self.ui.upPushButton.clicked.connect(self.moveUp)
self.ui.importButton.clicked.connect(self.importDialog)
self.ui.scriptsTable.currentCellChanged.connect(self.updateButtonsForCurrentCell)
self.ui.scriptsTable.cellDoubleClicked.connect(self.editScript)
self.ui.scriptsTable.setColumnWidth(1, 75)
self.ui.scriptsTable.setColumnWidth(2, 55)
self.ui.scriptsTable.setColumnWidth(3, 55)
def setupUi(self, setting):
self.modelName = setting.model_name
self.ui.enableCheckBox.setChecked(setting.enabled),
self.ui.insertStubCheckBox.setChecked(setting.insert_stub),
self.scr = setting.scripts
self.drawScripts()
self.updateButtons(False)
def drawScripts(self):
self.ui.scriptsTable.clearContents()
self.ui.scriptsTable.setRowCount(len(self.scr))
headerLabels = []
for idx, scr in enumerate(self.scr):
headerLabels.append(f'Script {idx}')
if isinstance(scr, TWConcrScript):
self.setRowMod(
idx,
scr.name,
scr.version,
mapTruthValueToIcon(scr.enabled),
mapTruthValueToIcon(False),
json.dumps(scr.conditions),
)
else:
iface = get_interface(scr.tag)
script = iface.getter(scr.id, scr.storage)
self.setRowMod(
idx,
script.name,
script.version,
mapTruthValueToIcon(script.enabled),
mapTruthValueToIcon(True),
json.dumps(script.conditions),
)
self.ui.scriptsTable.setVerticalHeaderLabels(headerLabels)
def setRowMod(self, row, *args):
for i, text in enumerate(args):
label = QLabel()
label.setText(text)
label.setAlignment(Qt.AlignCenter)
self.ui.scriptsTable.setCellWidget(row, i, label)
def editScript(self, row, column):
def saveScript(newScript):
self.scr[row] = newScript
self.drawScripts()
a = TWScriptConfig(mw, self.modelName, saveScript)
a.setupUi(self.scr[row])
a.exec_()
###########
def updateButtonsForCurrentCell(self, currentRow, currentColumn, previousRow, previousColumn):
self.updateButtons(currentRow != -1)
def updateButtons(self, state=True):
self.ui.deletePushButton.setEnabled(state)
self.ui.downPushButton.setEnabled(state)
self.ui.upPushButton.setEnabled(state)
def addScript(self):
newScript = deserialize_script(self.modelName, {
'name': 'New Script',
'description': '',
'enabled': True,
'conditions': [],
'statements': [],
})
self.scr.append(newScript)
self.drawScripts()
def deleteScript(self):
current_scr: Union[TWConcrScript, TWMetaScript] = self.scr[self.ui.scriptsTable.currentRow()]
def show_nondeletable():
from aqt.utils import showInfo # not to be deleted!
showInfo(
'This script does not allow for deletion.\n'
'You might have to uninstall the add-on which inserted this script.'
)
if isinstance(current_scr, TWConcrScript):
del self.scr[self.ui.scriptsTable.currentRow()] # gotta delete within dict
else:
iface = get_interface(current_scr.tag)
if iface.deletable:
is_deletable = iface.deletable(current_scr.id, current_scr.storage)
if is_deletable:
del self.scr[self.ui.scriptsTable.currentRow()] # gotta delete within dict
else:
show_nondeletable()
else:
show_nondeletable()
self.drawScripts()
self.updateButtons(False)
def moveDown(self):
i = self.ui.scriptsTable.currentRow()
if len(self.scr) != 1 and i < len(self.scr) - 1:
self.scr[i], self.scr[i + 1] = self.scr[i + 1], self.scr[i]
self.drawScripts()
self.ui.scriptsTable.setCurrentCell(i + 1, 0)
def moveUp(self):
i = self.ui.scriptsTable.currentRow()
if len(self.scr) != 1 and i > 0:
self.scr[i], self.scr[i - 1] = self.scr[i - 1], self.scr[i]
self.drawScripts()
self.ui.scriptsTable.setCurrentCell(i - 1, 0)
###########
def exportData(self):
result = deserialize_setting(self.modelName, {
"enabled": self.ui.enableCheckBox.isChecked(),
"insertStub": self.ui.insertStubCheckBox.isChecked(),
"scripts": self.scr,
})
return result
def importDialog(self):
def addAfterImport(scripts_new):
self.setupUi(self.scr + [deserialize_script(self.modelName, scr) for scr in scripts_new])
def replaceAfterImport(scripts_new):
self.setupUi([deserialize_script(self.modelName, scr) for scr in scripts_new])
dirpath = Path(f'{os.path.dirname(os.path.realpath(__file__))}', '../../json_schemas/scripts.json')
schema_path = dirpath.absolute().as_uri()
with dirpath.open('r') as jsonfile:
schema = json.load(jsonfile)
resolver = RefResolver(
schema_path,
schema,
)
validator = Draft7Validator(schema, resolver=resolver, format_checker=None)
dial = TWSettingAddReplace(mw)
dial.setupUi(
json.dumps([serialize_script(scr) for scr in self.scr], sort_keys=True, indent=4),
validator,
addAfterImport,
replaceAfterImport,
)
dial.exec_()
|
py | 1a3d9b5b88d652ee88872c6febafd9b241269138 | r"""
Polynomial Regression
=====================
This example shows how to use the :py:class:`pylops.Regression` operator
to perform *Polynomial regression analysis*.
In short, polynomial regression is the problem of finding the best fitting
coefficients for the following equation:
.. math::
y_i = \sum_{n=0}^{order} x_n t_i^n \qquad \forall i=1,2,...,N
As we can express this problem in a matrix form:
.. math::
\mathbf{y}= \mathbf{A} \mathbf{x}
our solution can be obtained by solving the following optimization problem:
.. math::
J= ||\mathbf{y} - \mathbf{A} \mathbf{x}||_2
See documentation of :py:class:`pylops.Regression` for more detailed
definition of the forward problem.
"""
import numpy as np
import matplotlib.pyplot as plt
import pylops
plt.close('all')
np.random.seed(10)
###############################################################################
# Define the input parameters: number of samples along the t-axis (``N``),
# order (``order``), regression coefficients (``x``), and standard deviation
# of noise to be added to data (``sigma``).
N = 30
order = 3
x = np.array([1., .05, 0., -.01])
sigma = 1
###############################################################################
# Let's create the time axis and initialize the
# :py:class:`pylops.Regression` operator
t = np.arange(N, dtype='float64') - N//2
PRop = pylops.Regression(t, order=order, dtype='float64')
###############################################################################
# We can then apply the operator in forward mode to compute our data points
# along the x-axis (``y``). We will also generate some random gaussian noise
# and create a noisy version of the data (``yn``).
y = PRop*x
yn = y + np.random.normal(0, sigma, N)
###############################################################################
# We are now ready to solve our problem. As we are using an operator from the
# :py:class:`pylops.LinearOperator` family, we can simply use ``/``,
# which in this case will solve the system by means of an iterative solver
# (i.e., :py:func:`scipy.sparse.linalg.lsqr`).
xest = PRop / y
xnest = PRop / yn
###############################################################################
# Let's plot the best fitting curve for the case of noise free and noisy data
plt.figure(figsize=(5, 7))
plt.plot(t, PRop*x, 'k', lw=4,
label=r'true: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' % (x[0], x[1], x[2], x[3]))
plt.plot(t, PRop*xest, '--r', lw=4,
label='est noise-free: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' %
(xest[0], xest[1], xest[2], xest[3]))
plt.plot(t, PRop*xnest, '--g', lw=4,
label='est noisy: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' %
(xnest[0], xnest[1], xnest[2], xnest[3]))
plt.scatter(t, y, c='r', s=70)
plt.scatter(t, yn, c='g', s=70)
plt.legend(fontsize='x-small')
###############################################################################
# We consider now the case where some of the observations have large errors.
# Such elements are generally referred to as *outliers* and can affect the
# quality of the least-squares solution if not treated with care. In this
# example we will see how using a L1 solver such as
# :py:func:`pylops.optimization.sparsity.IRLS` can drammatically improve the
# quality of the estimation of intercept and gradient.
# Add outliers
yn[1] += 40
yn[N-2] -= 20
# IRLS
nouter = 20
epsR = 1e-2
epsI = 0
tolIRLS = 1e-2
xnest = PRop / yn
xirls, nouter, xirls_hist, rw_hist = \
pylops.optimization.sparsity.IRLS(PRop, yn, nouter, threshR=False,
epsR=epsR, epsI=epsI,
tolIRLS=tolIRLS, returnhistory=True)
print('IRLS converged at %d iterations...' % nouter)
plt.figure(figsize=(5, 7))
plt.plot(t, PRop*x, 'k', lw=4,
label=r'true: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' % (x[0], x[1], x[2], x[3]))
plt.plot(t, PRop*xnest, '--r', lw=4,
label=r'L2: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' % (xnest[0], xnest[1], xnest[2], xnest[3]))
plt.plot(t, PRop*xirls, '--g', lw=4,
label=r'IRLS: $x_0$ = %.2f, $x_1$ = %.2f, '
r'$x_2$ = %.2f, $x_3$ = %.2f' % (xirls[0], xirls[1], xirls[2], xirls[3]))
plt.scatter(t, y, c='r', s=70)
plt.scatter(t, yn, c='g', s=70)
plt.legend(fontsize='x-small')
|
py | 1a3d9c45a8c7942de45d44f65c5566e03e6834fa | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from setuptools import setup, find_packages
setup(name='hatloads',
version='1.0.0',
description='HatLoads is a HatSploit collection of generic payloads designed to provide a wide range of attacks without having to spend time writing new ones.',
url='https://github.com/EntySec/HatLoads',
author='EntySec',
author_email='[email protected]',
license='MIT',
python_requires='>=3.7.0',
packages=find_packages(),
include_package_data=True,
install_requires=[
'hatasm @ git+https://github.com/EntySec/HatAsm',
'hatvenom @ git+https://github.com/EntySec/HatVenom'
],
zip_safe=False
)
|
py | 1a3d9e70c45288e06ecb31bce200ded5696180db | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import pytest
from bkuser_core.departments.models import Department
from bkuser_core.profiles.models import Profile
from bkuser_core.user_settings.models import Setting
pytestmark = pytest.mark.django_db
class TestCategory:
def test_delete(self, test_ldap_category, test_profile, test_department, test_setting):
test_ldap_category.delete()
assert not test_ldap_category.enabled
assert test_ldap_category.inactive
assert (
Profile.objects.filter(category_id=test_ldap_category.id, enabled=False).count()
== Profile.objects.filter(category_id=test_ldap_category.id).count()
)
assert (
Profile.objects.filter(category_id=test_ldap_category.id, status="DELETED").count()
== Profile.objects.filter(category_id=test_ldap_category.id).count()
)
assert (
Department.objects.filter(category_id=test_ldap_category.id, enabled=False).count()
== Department.objects.filter(category_id=test_ldap_category.id).count()
)
assert (
Setting.objects.filter(category_id=test_ldap_category.id, enabled=False).count()
== Setting.objects.filter(category_id=test_ldap_category.id).count()
)
|
py | 1a3d9f7914d4eb89b2d404d590d29e2192afa9a8 | #-----------------------------------------------------------------------------
# Runtime: 76ms
# Memory Usage:
# Link:
#-----------------------------------------------------------------------------
class Solution:
def searchMatrix(self, matrix: [[int]], target: int) -> bool:
row_length = len(matrix)
if row_length == 0:
return False
i, j = 0, len(matrix[0]) - 1
while i < row_length and j >= 0:
if matrix[i][j] == target:
return True
elif matrix[i][j] < target:
i += 1
else:
j -= 1
return False
|
py | 1a3d9fd7f68b98ba56f28de82b528fd3bd6f6247 | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'getTotalX' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER_ARRAY a
# 2. INTEGER_ARRAY b
#
def isCommonFactor(b, num):
for e in b:
if (e % num) != 0:
return False
return True
def isCommonMultiple(a, num):
for e in a:
if (num % e) != 0:
return False
return True
debug = False
def debug_print(*args):
global debug
if debug:
for arg in args:
sys.stdout.write(str(arg))
sys.stdout.write(' ')
sys.stdout.write('\n')
def findMultiplesFactors(a, b):
ismultiple = (b % a) == 0
result = []
if not ismultiple:
return result
result.append(a)
multiple = 1
while a * multiple < b:
multiple += 1
if multiple * a == b:
break
ismultiple = ((b % (a * multiple)) == 0)
if ismultiple:
debug_print("adds", a * multiple)
result.append(a * multiple)
else:
debug_print("skips", a * multiple)
return result + [b]
def findMultiplesFactors2(a, b):
result = []
tmp = b // a
if a * tmp != b:
return []
max_multiple = (b + a) // 2
result.append(a)
multiple = 1
a_mult = a
while a_mult < max_multiple:
multiple += 1
a_mult = a * multiple
tmp = b // a_mult
if a_mult == b or a_mult * tmp != b:
debug_print("skips", a_mult)
continue
debug_print("adds", a_mult)
result.append(a_mult)
result.append(b)
return sorted(result)
if debug:
for a in range(1, 200):
for b in range(a, 200):
ref = findMultiplesFactors(a, b)
cand = findMultiplesFactors2(a, b)
if ref != cand:
print('findMultiplesFactors(%d, %d) returned %s' % (a, b, ref))
print('findMultiplesFactors2(%d, %d) returned %s' % (a, b, cand))
assert(False)
def getTotalX(a, b):
a.sort()
b.sort()
if a[-1] > b[0]:
# No solution here
return 0
elif a[-1] == b[0]:
# only possible solution is b[0]
if isCommonFactor(b, b[0]) and isCommonMultiple(a, b[0]):
return 1
return 0
# Find the possible solutions
solutions = 0
mults = findMultiplesFactors2(a[-1], b[0])
for mult in mults:
if isCommonFactor(b, mult) and isCommonMultiple(a, mult):
solutions += 1
return solutions
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
arr = list(map(int, input().rstrip().split()))
brr = list(map(int, input().rstrip().split()))
total = getTotalX(arr, brr)
fptr.write(str(total) + '\n')
fptr.close()
|
py | 1a3da01267cd2c037d0f36c488434fa6640585a8 | import numpy as np
n, m = tuple(map(int, input().split()))
a = np.array([input().split() for i in range(0, n)], int)
b = np.array([input().split() for j in range(0, n)], int)
print(a+b, a-b, a*b, a//b, a % b, a**b, sep='\n') |
py | 1a3da034c86e6f88f8898ab6ada5840dd533599b | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants and types shared by tf.Transform Beam package."""
import collections
import enum
import os
import uuid
import apache_beam as beam
from apache_beam.typehints import Union
from tensorflow_transform import nodes
from tfx_bsl.telemetry import util
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple
NUMERIC_TYPE = Union[float, int]
PRIMITIVE_TYPE = Union[NUMERIC_TYPE, str, bytes]
METRICS_NAMESPACE = util.MakeTfxNamespace(['Transform'])
# Depending on the environment, (TF 1.x vs 2.x for e.g.,) we may want to
# register different implementations of beam nodes for the TFT beam nodes. These
# tags are used to identify the implementation to use under the current
# environment.
class EnvironmentTags(enum.Enum):
TF_COMPAT_V1 = 'tf_compat_v1'
TF_V2_ONLY = 'tf_v2_only'
_ALLOWED_PTRANSFORM_TAGS = [tag.value for tag in EnvironmentTags]
def get_unique_temp_path(base_temp_dir):
"""Return a path to a unique temp dir from given base temp dir.
Note this doesn't create the path that it returns.
Args:
base_temp_dir: A base directory
Returns:
The path name of a subdirectory of base_temp_dir, where the subdirectory is
unique.
"""
return os.path.join(base_temp_dir, uuid.uuid4().hex)
class _PtransformWrapper:
"""A wrapper around registered implementations of beam nodes."""
_GENERAL_ENVIRONMENT_TAG = object()
def __init__(self):
self._ptransform_by_tag = {}
def add_ptransform(self, ptransform_class, tags):
"""Add `ptransform_class` for all `tags`."""
# Many tags can refer to the same ptransform_class, but each
# ptransform_class should be registered only once.
tags = {self._GENERAL_ENVIRONMENT_TAG} if tags is None else tags
assert (tag not in self._ptransform_by_tag for tag in tags)
for tag in tags:
self._ptransform_by_tag[tag] = ptransform_class
def get_ptransform(self, tag):
"""Retrieves ptransform for `tag`.
Args:
tag: A string key (or None) to retrieve corresponding ptransform.
Returns:
A tuple of a registered beam.PTransform implementation and the tag it was
registered with.
Raises:
KeyError: If no registered PTransform implementation could be found.
"""
if tag is None or tag not in self._ptransform_by_tag:
return self._ptransform_by_tag[self._GENERAL_ENVIRONMENT_TAG], None
return self._ptransform_by_tag[tag], tag.value
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS = (
collections.defaultdict(_PtransformWrapper))
def register_ptransform(operation_def_subclass, tags=None):
"""Decorator to register a PTransform as the implementation for an analyzer.
This function is used to define implementations of the analyzers defined in
tensorflow_transform/analyzer_nodes.py and also the internal operations
defined in tensorflow_transform/beam/beam_nodes.py. The registered PTransform
will be invoked as follows:
outputs = inputs | operation.label >> MyPTransform(operation, extra_args)
where operation is a the instance of the subclass that was registered,
extra_args are global arguments available to each PTransform (see
ConstructBeamPipelineVisitor.extra_args) and `inputs` is a tuple of
PCollections correpsonding to the inputs of the OperationNode being
implemented. The return value `outputs` should be a a tuple of PCollections
corresponding to the outputs of the OperationNode. If the OperationNode has
a single output then the return value can also be a PCollection instead of a
tuple.
In some cases the implementation cannot be a PTransform and so instead the
value being registered may also be a function. The registered function will
be invoked as follows:
outputs = my_function(inputs, operation, extra_args)
where inputs, operation, extra_args and outputs are the same as for the
PTransform case.
Args:
operation_def_subclass: The class of attributes that is being registered.
Should be a subclass of `tensorflow_transform.nodes.OperationDef`.
tags: A set of string tags belonging to `EnvironmentTags`. If
provided, the PTransform will be registered against all of them.
Returns:
A class decorator that registers a PTransform or function as an
implementation of the OperationDef subclass.
"""
def register(ptransform_class):
assert isinstance(ptransform_class, type)
assert issubclass(ptransform_class, beam.PTransform)
assert tags is None or (tag in _ALLOWED_PTRANSFORM_TAGS for tag in tags)
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS[
operation_def_subclass].add_ptransform(ptransform_class, tags)
return ptransform_class
return register
class ConstructBeamPipelineVisitor(nodes.Visitor):
"""Visitor that constructs the beam pipeline from the node graph."""
ExtraArgs = tfx_namedtuple.namedtuple( # pylint: disable=invalid-name
'ExtraArgs', [
'base_temp_dir',
'pipeline',
'flat_pcollection',
'pcollection_dict',
'tf_config',
'graph',
'input_signature',
'input_specs',
'input_tensor_adapter_config',
'use_tf_compat_v1',
'cache_pcoll_dict',
'preprocessing_fn',
])
def __init__(self, extra_args):
self._extra_args = extra_args
def visit(self, operation, inputs):
try:
ptransform_wrapper = (
_PTRANSFORM_BY_OPERATION_DEF_SUBCLASS[operation.__class__])
environment_tag = (
EnvironmentTags.TF_COMPAT_V1
if self._extra_args.use_tf_compat_v1 else EnvironmentTags.TF_V2_ONLY)
ptransform, tag = ptransform_wrapper.get_ptransform(environment_tag)
except KeyError:
raise ValueError('No implementation for {} was registered'.format(
operation))
# TODO(zoyahav): Consider extracting a single PCollection before passing to
# ptransform if len(inputs) == 1.
if tag is None:
tagged_label = operation.label
else:
tagged_label = '{label}[{tag}]'.format(label=operation.label, tag=tag)
outputs = ((inputs or beam.pvalue.PBegin(self._extra_args.pipeline))
| tagged_label >> ptransform(operation, self._extra_args))
if isinstance(outputs, beam.pvalue.PCollection):
return (outputs,)
else:
return outputs
def validate_value(self, value):
if not isinstance(value, beam.pvalue.PCollection):
raise TypeError('Expected a PCollection, got {} of type {}'.format(
value, type(value)))
class IncrementCounter(beam.PTransform):
"""A PTransform that increments a counter once per PCollection.
The output PCollection is the same as the input PCollection.
"""
def __init__(self, counter_name):
self._counter_name = counter_name
def _make_and_increment_counter(self, unused_element):
del unused_element
beam.metrics.Metrics.counter(METRICS_NAMESPACE, self._counter_name).inc()
return None
def expand(self, pcoll):
_ = (
pcoll.pipeline
| 'CreateSole' >> beam.Create([None])
| 'Count' >> beam.Map(self._make_and_increment_counter))
return pcoll
|
py | 1a3da064419a57c23875b852da1d6525d574a99c | import torch.nn as nn
import torch.optim as optim
import argparse
import os
from utils import process_all_files,load_GloVe,accuracy_cal
from model import GA_Reader
from data_loader import DataLoader,TestLoader
def train(epochs,iterations,loader_train,loader_val,
model,optimizer,loss_function):
for epoch in range(epochs):
for iteration in range(iterations):
model.train()
optimizer.zero_grad()
doc,doc_char,doc_mask,query,query_char,query_mask, \
char_type,char_type_mask,answer,cloze,cand, \
cand_mask,qe_comm=loader_train.__load_next__()
output=model( doc,doc_char,doc_mask,query,query_char,query_mask,
char_type,char_type_mask,answer,cloze,cand,
cand_mask,qe_comm)
loss=loss_function(output,answer)
scalar=loss.item()
loss.backward()
optimizer.step()
valid_loss,valid_acc=validate(loader_val,model,loss_function)
print('epoch=',epoch+1,'iteration=',iteration+1,'training loss=',scalar,
'validation loss=',valid_loss,'validation accuracy=',valid_acc)
if epoch>=2:
optimizer=optim.Adam(model.parameters(),lr=optimizer.param_groups[0]['lr']/2)
def validate(loader_val,model,loss_function):
model.eval()
return_loss=0
accuracy=0
for _ in range(loader_val.examples//loader_val.batch_size):
doc,doc_char,doc_mask,query,query_char,query_mask, \
char_type,char_type_mask,answer,cloze,cand, \
cand_mask,qe_comm=loader_val.__load_next__()
output=model( doc,doc_char,doc_mask,query,query_char,query_mask,
char_type,char_type_mask,answer,cloze,cand,
cand_mask,qe_comm)
accuracy+=accuracy_cal(output,answer)
loss=loss_function(output,answer)
return_loss+=loss.item()
return_loss/=(loader_val.examples//loader_val.batch_size)
accuracy=100*accuracy/loader_val.examples
return return_loss,accuracy
def test(loader_test,model):
model.eval()
accuracy=0
for _ in range(loader_test.examples//loader_test.batch_size):
doc,doc_char,doc_mask,query,query_char,query_mask, \
char_type,char_type_mask,answer,cloze,cand, \
cand_mask,qe_comm=loader_test.__load_next__()
output=model( doc,doc_char,doc_mask,query,query_char,query_mask,
char_type,char_type_mask,answer,cloze,cand,
cand_mask,qe_comm)
accuracy+=accuracy_cal(output,answer)
accuracy=100*accuracy/loader_test.examples
print('test accuracy=',accuracy)
def main(args):
word_to_int,int_to_word,char_to_int,int_to_char, \
training_data=process_all_files(args.train_file)
glove_embeddings=load_GloVe(args.embed_file,word_to_int,args.embed_size)
loss_function=nn.CrossEntropyLoss()
model=GA_Reader(len(char_to_int),args.char_size,args.embed_size,
args.char_hidden_size,args.hidden_size,len(word_to_int),
glove_embeddings,args.gru_layers,args.use_features,args.use_char)
optimizer=optim.Adam(model.parameters(),lr=args.lr)
data_loader_train=DataLoader(training_data[:args.training_size],args.batch_size)
data_loader_validate=TestLoader(training_data[args.training_size:args. \
training_size+args.dev_size],args.dev_size)
data_loader_test=TestLoader(training_data[args. \
training_size_args.dev_size:args. \
training_size+args.dev_size+args.test_size],args.test_size)
train(args.epochs,args.iterations,data_loader_train,
data_loader_validate,model,optimizer,loss_function)
test(data_loader_test,model)
def setup():
parser=argparse.ArgumentParser('argument parser')
parser.add_argument('--lr',type=float,default=0.00005)
parser.add_argument('--epochs',type=int,default=12)
parser.add_argument('--iterations',type=int,default=120)
parser.add_argument('--hidden_size',type=int,default=256)
parser.add_argument('--char_hidden_size',type=int,default=50)
parser.add_argument('--char_size',type=int,default=25)
parser.add_argument('--embed_size',type=int,default=100)
parser.add_argument('--use_features',type=bool,default=True)
parser.add_argument('--use_char',type=bool,default=True)
parser.add_argument('--batch_size',type=int,default=32)
parser.add_argument('--gru_layers',type=int,default=3)
parser.add_argument('--embed_file',type=str,default=os.getcwd()+'/word2vec_glove.text')
parser.add_argument('--train_file',type=str,default=os.getcwd()+'/train/')
parser.add_argument('--train_size',type=int,default=380298)
parser.add_argument('--dev_size',type=int,default=3924)
parser.add_argument('--test_size',type=int,default=3198)
args=parser.parse_args()
return args
if __name__=='__main__':
args=setup()
main(args) |
py | 1a3da0ad20c5299e7dbb694343de72bad3ab5d33 | # -*- coding: utf-8 -*-
__author__ = 'spatel'
|
py | 1a3da0fc086874bf20f1e9d2c03349e0bfd77f2d | '''
Merge Sort
Time Complexity: O(N*log(N))
Space Complexity: N
'''
from algorithms.Algorithm import Algorithm
class MergeSort(Algorithm):
def __init__(self):
super().__init__("Merge Sort")
def algorithm(self, temp_array = [], index = 0):
if temp_array == []:
temp_array = self.array.copy()
if len(temp_array) > 1:
m = len(temp_array)//2
left = temp_array[:m]
right = temp_array[m:]
self.algorithm(left, index)
self.algorithm(right, index+m)
#i - index of left array, j - index of right array, k - index of temp merged array
i = j = k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
if self.array[index] != left[i]:
self.array[index], self.array[index-j+m] = left[i], self.array[index]
self.update(index, index-j+m)
else:
self.array[index] = left[i]
self.update(index)
temp_array[k] = left[i]
i += 1
else:
self.array[index], self.array[index-i+m] = right[j], self.array[index]
self.update(index, index-i+m)
temp_array[k] = right[j]
j += 1
#visualise the sortingm+k
index += 1
k += 1
while i < len(left):
self.array[index] = left[i]
temp_array[k] = left[i]
#visualise the sorting
self.update(index)
index += 1
i += 1
k += 1
while j < len(right):
self.array[index] = right[j]
temp_array[k] = right[j]
#visualise the sorting
self.update(index)
index += 1
j += 1
k += 1
|
py | 1a3da21ec7e8dcb5691692f67a58fce09ea12bfe | import json
import re
from lxml import html
import HTMLInfo
import sys
class JDPrice(object):
def __init__(self, url):
self.url = url
HTMLInfo.REFERER = url
r = HTMLInfo.get_html(url)
self.html = r.text
self.info = self.get_product()
def get_url_page(self):
tree = html.fromstring(self.html)
page = tree.xpath('//div[@id="J_filter"]//div[@id="J_topPage"]//span[@class="fp-text"]/i/text()')
if page:
page = page[0]
else:
print("Error: Cannot get the pages")
sys.exit()
return int(page) if int(page) < 2 else 2
def create_url(self, url_list):
page = self.get_url_page()
for i in range(1, int(page) + 1):
url_list.append(self.url + str(i))
def get_itemlist(self, itemlist):
tree = html.fromstring(self.html)
status = tree.xpath('//div[@id="J_goodsList"]//div[@class="p-img"]//@href')
for item in status:
if re.search('^//item.jd.com', item):
item = re.sub('//', 'https://', item)
if item not in itemlist:
itemlist.append(item)
def get_product(self):
product_pattern = re.compile(r'compatible: true,(.*?)};', re.S)
product_info = re.findall(product_pattern, self.html)
if product_info:
return product_info[0]
return None
def get_product_jpg(self):
jpg_pattern = re.compile(r"src: '(.*?)',")
jpg = "http://img10.360buyimg.com/n1/" + re.findall(jpg_pattern, self.info)[0]
return jpg
def get_product_skuid(self):
sku_id_pattern = re.compile(r'skuid: (.*?),')
sku_id = re.findall(sku_id_pattern, self.info)[0]
return sku_id
def get_product_cate(self):
cat_pattern = re.compile(r"cat: \[(.*?)\],")
cat = re.findall(cat_pattern, self.info)[0]
return cat
def get_vendorId(self):
vid_pattern = re.compile(r'venderId:(.*?),')
vid = re.findall(vid_pattern, self.info)[0]
return vid
def get_shopId(self):
sid_pattern = re.compile(r"shopId:'(.*?)',")
sid = re.findall(sid_pattern, self.info)[0]
return sid
def get_product_promotion(self):
discount = {}
content = ""
vip = ""
sku_id = self.get_product_skuid()
cat = self.get_product_cate()
vender_id = self.get_vendorId()
shop_id = self.get_shopId()
# 2_2813_51976_0 stands for Shanghai; 1_72_2799_0 means Beijing
url = "http://cd.jd.com/promotion/v2?&skuId=" + sku_id + "&area=2_2813_51976_0&shopId=" + shop_id + "&venderId=" + vender_id + "&cat=" + cat
prom = HTMLInfo.get_html(url).content.decode('gbk')
try:
if prom.find('You have triggered an abuse') < 0:
prom = json.loads(prom)
if "skuCoupon" in prom.keys():
if prom["skuCoupon"]:
for i in prom["skuCoupon"]:
discount[i["discount"]] = i["quota"]
if "prom" in prom.keys():
if "tags" in prom["prom"].keys():
if prom["prom"]["tags"]:
if prom["prom"]["tags"][0]["name"] == u'会员特价':
vip = prom["prom"]["tags"][0]["name"]
if "pickOneTag" in prom["prom"].keys():
if prom["prom"]["pickOneTag"]:
content = prom["prom"]["pickOneTag"][0]["content"]
except Exception as ex:
print('get_product_promotion ', ex)
sale = ""
gift = ""
if discount:
for i in discount.keys():
sale += u'满减:满' + str(discount[i]) + u'减' + str(i) + "<br />"
if vip:
vip = str(vip) + "<br />"
if content:
gift = u'满赠:' + str(content) + "<br />"
promotion = vip + sale + gift
return promotion
def get_product_name(self):
name = ""
try:
name_pattern = re.compile(r"name: '(.*?)',")
name = re.findall(name_pattern, self.info)[0]
except Exception as ex:
print(ex)
return bytes(name.encode()).decode('unicode-escape')
def get_product_price(self):
price = ""
plus_price = ""
date = {}
sku_id = self.get_product_skuid()
r = HTMLInfo.get_html("https://d.jd.com/lab/get?callback=lab")
match_pattern = re.compile(r"lab\(\[(.*?)\]\)")
try:
json_data = json.loads(re.findall(match_pattern, r.text)[0])
except Exception as ex:
print('get_product_price Ex:', ex)
if re.match('www.jd.com', json_data['url']):
date = json_data["startOn"]
date = str(date) + "1608370126"
# this url to get the price for JD
url = "http://p.3.cn/prices/mgets?&type=1&pduid=" + date + "&skuIds=J_" + sku_id
# response.json() can return the json-encoded content of a response
status = HTMLInfo.get_html(url).json()[0]
if status:
if 'tpp' in status:
plus_price = u"PLUS价:<br />" + status['tpp']
if 'p' in status:
price = u"京东价:<br />" + status['p']
return price + "<br />" + plus_price
if __name__ == '__main__':
jd = JDPrice("https://item.jd.com/4488334.html")
print(jd.get_product_price())
|
py | 1a3da31d78fa1ace0a25c686c5d93b36f2d14a4f | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from osc_lib import exceptions
from openstackclient.common import quota
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit.identity.v2_0 import fakes as identity_fakes
from openstackclient.tests.unit.identity.v3 import fakes as identity_fakes_v3
from openstackclient.tests.unit.network.v2 import fakes as network_fakes
from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
class FakeQuotaResource(fakes.FakeResource):
_keys = {'property': 'value'}
def set_keys(self, args):
self._keys.update(args)
def unset_keys(self, keys):
for key in keys:
self._keys.pop(key, None)
def get_keys(self):
return self._keys
class TestQuota(compute_fakes.TestComputev2):
def setUp(self):
super(TestQuota, self).setUp()
# Set up common projects
self.projects = identity_fakes_v3.FakeProject.create_projects(count=2)
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
self.projects_mock.get.return_value = self.projects[0]
self.compute_quotas_mock = self.app.client_manager.compute.quotas
self.compute_quotas_mock.reset_mock()
self.compute_quotas_class_mock = \
self.app.client_manager.compute.quota_classes
self.compute_quotas_class_mock.reset_mock()
self.volume_quotas_mock = self.app.client_manager.volume.quotas
self.volume_quotas_mock.reset_mock()
self.volume_quotas_class_mock = \
self.app.client_manager.volume.quota_classes
self.volume_quotas_class_mock.reset_mock()
self.app.client_manager.auth_ref = mock.Mock()
self.app.client_manager.auth_ref.service_catalog = mock.Mock()
self.service_catalog_mock = \
self.app.client_manager.auth_ref.service_catalog
self.service_catalog_mock.reset_mock()
self.app.client_manager.auth_ref.project_id = identity_fakes.project_id
class TestQuotaList(TestQuota):
"""Test cases for quota list command"""
compute_column_header = (
'Project ID',
'Cores',
'Fixed IPs',
'Injected Files',
'Injected File Content Bytes',
'Injected File Path Bytes',
'Instances',
'Key Pairs',
'Metadata Items',
'Ram',
'Server Groups',
'Server Group Members',
)
network_column_header = (
'Project ID',
'Floating IPs',
'Networks',
'Ports',
'RBAC Policies',
'Routers',
'Security Groups',
'Security Group Rules',
'Subnets',
'Subnet Pools'
)
volume_column_header = (
'Project ID',
'Backups',
'Backup Gigabytes',
'Gigabytes',
'Per Volume Gigabytes',
'Snapshots',
'Volumes',
)
def setUp(self):
super(TestQuotaList, self).setUp()
# Work with multiple projects in this class
self.projects_mock.get.side_effect = self.projects
self.projects_mock.list.return_value = self.projects
self.compute_quotas = [
compute_fakes.FakeQuota.create_one_comp_quota(),
compute_fakes.FakeQuota.create_one_comp_quota(),
]
self.compute_default_quotas = [
compute_fakes.FakeQuota.create_one_default_comp_quota(),
compute_fakes.FakeQuota.create_one_default_comp_quota(),
]
self.compute = self.app.client_manager.compute
self.compute.quotas.defaults = mock.Mock(
side_effect=self.compute_default_quotas,
)
self.compute_reference_data = (
self.projects[0].id,
self.compute_quotas[0].cores,
self.compute_quotas[0].fixed_ips,
self.compute_quotas[0].injected_files,
self.compute_quotas[0].injected_file_content_bytes,
self.compute_quotas[0].injected_file_path_bytes,
self.compute_quotas[0].instances,
self.compute_quotas[0].key_pairs,
self.compute_quotas[0].metadata_items,
self.compute_quotas[0].ram,
self.compute_quotas[0].server_groups,
self.compute_quotas[0].server_group_members,
)
self.network_quotas = [
network_fakes.FakeQuota.create_one_net_quota(),
network_fakes.FakeQuota.create_one_net_quota(),
]
self.network_default_quotas = [
network_fakes.FakeQuota.create_one_default_net_quota(),
network_fakes.FakeQuota.create_one_default_net_quota(),
]
self.network = self.app.client_manager.network
self.network.get_quota_default = mock.Mock(
side_effect=self.network_default_quotas,
)
self.network_reference_data = (
self.projects[0].id,
self.network_quotas[0].floating_ips,
self.network_quotas[0].networks,
self.network_quotas[0].ports,
self.network_quotas[0].rbac_policies,
self.network_quotas[0].routers,
self.network_quotas[0].security_groups,
self.network_quotas[0].security_group_rules,
self.network_quotas[0].subnets,
self.network_quotas[0].subnet_pools,
)
self.volume_quotas = [
volume_fakes.FakeQuota.create_one_vol_quota(),
volume_fakes.FakeQuota.create_one_vol_quota(),
]
self.volume_default_quotas = [
volume_fakes.FakeQuota.create_one_default_vol_quota(),
volume_fakes.FakeQuota.create_one_default_vol_quota(),
]
self.volume = self.app.client_manager.volume
self.volume.quotas.defaults = mock.Mock(
side_effect=self.volume_default_quotas,
)
self.volume_reference_data = (
self.projects[0].id,
self.volume_quotas[0].backups,
self.volume_quotas[0].backup_gigabytes,
self.volume_quotas[0].gigabytes,
self.volume_quotas[0].per_volume_gigabytes,
self.volume_quotas[0].snapshots,
self.volume_quotas[0].volumes,
)
self.cmd = quota.ListQuota(self.app, None)
def test_quota_list_compute(self):
# Two projects with non-default quotas
self.compute.quotas.get = mock.Mock(
side_effect=self.compute_quotas,
)
arglist = [
'--compute',
]
verifylist = [
('compute', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.compute_column_header, columns)
self.assertEqual(self.compute_reference_data, ret_quotas[0])
self.assertEqual(2, len(ret_quotas))
def test_quota_list_compute_default(self):
# One of the projects is at defaults
self.compute.quotas.get = mock.Mock(
side_effect=[
self.compute_quotas[0],
compute_fakes.FakeQuota.create_one_default_comp_quota(),
],
)
arglist = [
'--compute',
]
verifylist = [
('compute', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.compute_column_header, columns)
self.assertEqual(self.compute_reference_data, ret_quotas[0])
self.assertEqual(1, len(ret_quotas))
def test_quota_list_compute_no_project_not_found(self):
# Make one of the projects disappear
self.compute.quotas.get = mock.Mock(
side_effect=[
self.compute_quotas[0],
exceptions.NotFound("NotFound"),
],
)
arglist = [
'--compute',
]
verifylist = [
('compute', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.compute_column_header, columns)
self.assertEqual(self.compute_reference_data, ret_quotas[0])
self.assertEqual(1, len(ret_quotas))
def test_quota_list_compute_no_project_4xx(self):
# Make one of the projects disappear
self.compute.quotas.get = mock.Mock(
side_effect=[
self.compute_quotas[0],
exceptions.BadRequest("Bad request"),
],
)
arglist = [
'--compute',
]
verifylist = [
('compute', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.compute_column_header, columns)
self.assertEqual(self.compute_reference_data, ret_quotas[0])
self.assertEqual(1, len(ret_quotas))
def test_quota_list_compute_no_project_5xx(self):
# Make one of the projects disappear
self.compute.quotas.get = mock.Mock(
side_effect=[
self.compute_quotas[0],
exceptions.HTTPNotImplemented("Not implemented??"),
],
)
arglist = [
'--compute',
]
verifylist = [
('compute', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.HTTPNotImplemented,
self.cmd.take_action,
parsed_args,
)
def test_quota_list_network(self):
# Two projects with non-default quotas
self.network.get_quota = mock.Mock(
side_effect=self.network_quotas,
)
arglist = [
'--network',
]
verifylist = [
('network', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.network_column_header, columns)
self.assertEqual(self.network_reference_data, ret_quotas[0])
self.assertEqual(2, len(ret_quotas))
def test_quota_list_network_default(self):
# Two projects with non-default quotas
self.network.get_quota = mock.Mock(
side_effect=[
self.network_quotas[0],
network_fakes.FakeQuota.create_one_default_net_quota(),
],
)
arglist = [
'--network',
]
verifylist = [
('network', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.network_column_header, columns)
self.assertEqual(self.network_reference_data, ret_quotas[0])
self.assertEqual(1, len(ret_quotas))
def test_quota_list_network_no_project(self):
# Two projects with non-default quotas
self.network.get_quota = mock.Mock(
side_effect=[
self.network_quotas[0],
exceptions.NotFound("NotFound"),
],
)
arglist = [
'--network',
]
verifylist = [
('network', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.network_column_header, columns)
self.assertEqual(self.network_reference_data, ret_quotas[0])
self.assertEqual(1, len(ret_quotas))
def test_quota_list_volume(self):
# Two projects with non-default quotas
self.volume.quotas.get = mock.Mock(
side_effect=self.volume_quotas,
)
arglist = [
'--volume',
]
verifylist = [
('volume', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.volume_column_header, columns)
self.assertEqual(self.volume_reference_data, ret_quotas[0])
self.assertEqual(2, len(ret_quotas))
def test_quota_list_volume_default(self):
# Two projects with non-default quotas
self.volume.quotas.get = mock.Mock(
side_effect=[
self.volume_quotas[0],
volume_fakes.FakeQuota.create_one_default_vol_quota(),
],
)
arglist = [
'--volume',
]
verifylist = [
('volume', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.volume_column_header, columns)
self.assertEqual(self.volume_reference_data, ret_quotas[0])
self.assertEqual(1, len(ret_quotas))
def test_quota_list_volume_no_project(self):
# Two projects with non-default quotas
self.volume.quotas.get = mock.Mock(
side_effect=[
self.volume_quotas[0],
volume_fakes.FakeQuota.create_one_default_vol_quota(),
],
)
arglist = [
'--volume',
]
verifylist = [
('volume', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
ret_quotas = list(data)
self.assertEqual(self.volume_column_header, columns)
self.assertEqual(self.volume_reference_data, ret_quotas[0])
self.assertEqual(1, len(ret_quotas))
class TestQuotaSet(TestQuota):
def setUp(self):
super(TestQuotaSet, self).setUp()
self.compute_quotas_mock.update.return_value = FakeQuotaResource(
None,
copy.deepcopy(compute_fakes.QUOTA),
loaded=True,
)
self.compute_quotas_class_mock.update.return_value = FakeQuotaResource(
None,
copy.deepcopy(compute_fakes.QUOTA),
loaded=True,
)
self.volume_quotas_mock.update.return_value = FakeQuotaResource(
None,
copy.deepcopy(compute_fakes.QUOTA),
loaded=True,
)
self.volume_quotas_class_mock.update.return_value = FakeQuotaResource(
None,
copy.deepcopy(compute_fakes.QUOTA),
loaded=True,
)
self.network_mock = self.app.client_manager.network
self.network_mock.update_quota = mock.Mock()
self.cmd = quota.SetQuota(self.app, None)
def test_quota_set(self):
arglist = [
'--floating-ips', str(compute_fakes.floating_ip_num),
'--fixed-ips', str(compute_fakes.fix_ip_num),
'--injected-files', str(compute_fakes.injected_file_num),
'--injected-file-size', str(compute_fakes.injected_file_size_num),
'--injected-path-size', str(compute_fakes.injected_path_size_num),
'--key-pairs', str(compute_fakes.key_pair_num),
'--cores', str(compute_fakes.core_num),
'--ram', str(compute_fakes.ram_num),
'--instances', str(compute_fakes.instance_num),
'--properties', str(compute_fakes.property_num),
'--secgroup-rules', str(compute_fakes.secgroup_rule_num),
'--secgroups', str(compute_fakes.secgroup_num),
'--server-groups', str(compute_fakes.servgroup_num),
'--server-group-members', str(compute_fakes.servgroup_members_num),
self.projects[0].name,
]
verifylist = [
('floating_ips', compute_fakes.floating_ip_num),
('fixed_ips', compute_fakes.fix_ip_num),
('injected_files', compute_fakes.injected_file_num),
('injected_file_content_bytes',
compute_fakes.injected_file_size_num),
('injected_file_path_bytes', compute_fakes.injected_path_size_num),
('key_pairs', compute_fakes.key_pair_num),
('cores', compute_fakes.core_num),
('ram', compute_fakes.ram_num),
('instances', compute_fakes.instance_num),
('metadata_items', compute_fakes.property_num),
('security_group_rules', compute_fakes.secgroup_rule_num),
('security_groups', compute_fakes.secgroup_num),
('server_groups', compute_fakes.servgroup_num),
('server_group_members', compute_fakes.servgroup_members_num),
('project', self.projects[0].name),
]
self.app.client_manager.network_endpoint_enabled = False
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
kwargs = {
'floating_ips': compute_fakes.floating_ip_num,
'fixed_ips': compute_fakes.fix_ip_num,
'injected_files': compute_fakes.injected_file_num,
'injected_file_content_bytes':
compute_fakes.injected_file_size_num,
'injected_file_path_bytes': compute_fakes.injected_path_size_num,
'key_pairs': compute_fakes.key_pair_num,
'cores': compute_fakes.core_num,
'ram': compute_fakes.ram_num,
'instances': compute_fakes.instance_num,
'metadata_items': compute_fakes.property_num,
'security_group_rules': compute_fakes.secgroup_rule_num,
'security_groups': compute_fakes.secgroup_num,
'server_groups': compute_fakes.servgroup_num,
'server_group_members': compute_fakes.servgroup_members_num,
}
self.compute_quotas_mock.update.assert_called_once_with(
self.projects[0].id,
**kwargs
)
self.assertIsNone(result)
def test_quota_set_volume(self):
arglist = [
'--gigabytes', str(volume_fakes.QUOTA['gigabytes']),
'--snapshots', str(volume_fakes.QUOTA['snapshots']),
'--volumes', str(volume_fakes.QUOTA['volumes']),
'--backups', str(volume_fakes.QUOTA['backups']),
'--backup-gigabytes', str(volume_fakes.QUOTA['backup_gigabytes']),
'--per-volume-gigabytes',
str(volume_fakes.QUOTA['per_volume_gigabytes']),
self.projects[0].name,
]
verifylist = [
('gigabytes', volume_fakes.QUOTA['gigabytes']),
('snapshots', volume_fakes.QUOTA['snapshots']),
('volumes', volume_fakes.QUOTA['volumes']),
('backups', volume_fakes.QUOTA['backups']),
('backup_gigabytes', volume_fakes.QUOTA['backup_gigabytes']),
('per_volume_gigabytes',
volume_fakes.QUOTA['per_volume_gigabytes']),
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
kwargs = {
'gigabytes': volume_fakes.QUOTA['gigabytes'],
'snapshots': volume_fakes.QUOTA['snapshots'],
'volumes': volume_fakes.QUOTA['volumes'],
'backups': volume_fakes.QUOTA['backups'],
'backup_gigabytes': volume_fakes.QUOTA['backup_gigabytes'],
'per_volume_gigabytes': volume_fakes.QUOTA['per_volume_gigabytes']
}
self.volume_quotas_mock.update.assert_called_once_with(
self.projects[0].id,
**kwargs
)
self.assertIsNone(result)
def test_quota_set_volume_with_volume_type(self):
arglist = [
'--gigabytes', str(volume_fakes.QUOTA['gigabytes']),
'--snapshots', str(volume_fakes.QUOTA['snapshots']),
'--volumes', str(volume_fakes.QUOTA['volumes']),
'--backups', str(volume_fakes.QUOTA['backups']),
'--backup-gigabytes', str(volume_fakes.QUOTA['backup_gigabytes']),
'--per-volume-gigabytes',
str(volume_fakes.QUOTA['per_volume_gigabytes']),
'--volume-type', 'volume_type_backend',
self.projects[0].name,
]
verifylist = [
('gigabytes', volume_fakes.QUOTA['gigabytes']),
('snapshots', volume_fakes.QUOTA['snapshots']),
('volumes', volume_fakes.QUOTA['volumes']),
('backups', volume_fakes.QUOTA['backups']),
('backup_gigabytes', volume_fakes.QUOTA['backup_gigabytes']),
('per_volume_gigabytes',
volume_fakes.QUOTA['per_volume_gigabytes']),
('volume_type', 'volume_type_backend'),
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
kwargs = {
'gigabytes_volume_type_backend': volume_fakes.QUOTA['gigabytes'],
'snapshots_volume_type_backend': volume_fakes.QUOTA['snapshots'],
'volumes_volume_type_backend': volume_fakes.QUOTA['volumes'],
'backups': volume_fakes.QUOTA['backups'],
'backup_gigabytes': volume_fakes.QUOTA['backup_gigabytes'],
'per_volume_gigabytes': volume_fakes.QUOTA['per_volume_gigabytes']
}
self.volume_quotas_mock.update.assert_called_once_with(
self.projects[0].id,
**kwargs
)
self.assertIsNone(result)
def test_quota_set_network(self):
arglist = [
'--subnets', str(network_fakes.QUOTA['subnet']),
'--networks', str(network_fakes.QUOTA['network']),
'--floating-ips', str(network_fakes.QUOTA['floatingip']),
'--subnetpools', str(network_fakes.QUOTA['subnetpool']),
'--secgroup-rules',
str(network_fakes.QUOTA['security_group_rule']),
'--secgroups', str(network_fakes.QUOTA['security_group']),
'--routers', str(network_fakes.QUOTA['router']),
'--rbac-policies', str(network_fakes.QUOTA['rbac_policy']),
'--ports', str(network_fakes.QUOTA['port']),
'--vips', str(network_fakes.QUOTA['vip']),
'--health-monitors', str(network_fakes.QUOTA['healthmonitor']),
'--l7policies', str(network_fakes.QUOTA['l7policy']),
self.projects[0].name,
]
verifylist = [
('subnet', network_fakes.QUOTA['subnet']),
('network', network_fakes.QUOTA['network']),
('floatingip', network_fakes.QUOTA['floatingip']),
('subnetpool', network_fakes.QUOTA['subnetpool']),
('security_group_rule',
network_fakes.QUOTA['security_group_rule']),
('security_group', network_fakes.QUOTA['security_group']),
('router', network_fakes.QUOTA['router']),
('rbac_policy', network_fakes.QUOTA['rbac_policy']),
('port', network_fakes.QUOTA['port']),
('vip', network_fakes.QUOTA['vip']),
('healthmonitor', network_fakes.QUOTA['healthmonitor']),
('l7policy', network_fakes.QUOTA['l7policy']),
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
kwargs = {
'subnet': network_fakes.QUOTA['subnet'],
'network': network_fakes.QUOTA['network'],
'floatingip': network_fakes.QUOTA['floatingip'],
'subnetpool': network_fakes.QUOTA['subnetpool'],
'security_group_rule':
network_fakes.QUOTA['security_group_rule'],
'security_group': network_fakes.QUOTA['security_group'],
'router': network_fakes.QUOTA['router'],
'rbac_policy': network_fakes.QUOTA['rbac_policy'],
'port': network_fakes.QUOTA['port'],
'vip': network_fakes.QUOTA['vip'],
'healthmonitor': network_fakes.QUOTA['healthmonitor'],
'l7policy': network_fakes.QUOTA['l7policy'],
}
self.network_mock.update_quota.assert_called_once_with(
self.projects[0].id,
**kwargs
)
self.assertIsNone(result)
def test_quota_set_with_class(self):
arglist = [
'--injected-files', str(compute_fakes.injected_file_num),
'--injected-file-size', str(compute_fakes.injected_file_size_num),
'--injected-path-size', str(compute_fakes.injected_path_size_num),
'--key-pairs', str(compute_fakes.key_pair_num),
'--cores', str(compute_fakes.core_num),
'--ram', str(compute_fakes.ram_num),
'--instances', str(compute_fakes.instance_num),
'--properties', str(compute_fakes.property_num),
'--server-groups', str(compute_fakes.servgroup_num),
'--server-group-members', str(compute_fakes.servgroup_members_num),
'--gigabytes', str(compute_fakes.floating_ip_num),
'--snapshots', str(compute_fakes.fix_ip_num),
'--volumes', str(volume_fakes.QUOTA['volumes']),
'--network', str(network_fakes.QUOTA['network']),
'--class',
self.projects[0].name,
]
verifylist = [
('injected_files', compute_fakes.injected_file_num),
('injected_file_content_bytes',
compute_fakes.injected_file_size_num),
('injected_file_path_bytes', compute_fakes.injected_path_size_num),
('key_pairs', compute_fakes.key_pair_num),
('cores', compute_fakes.core_num),
('ram', compute_fakes.ram_num),
('instances', compute_fakes.instance_num),
('metadata_items', compute_fakes.property_num),
('server_groups', compute_fakes.servgroup_num),
('server_group_members', compute_fakes.servgroup_members_num),
('gigabytes', compute_fakes.floating_ip_num),
('snapshots', compute_fakes.fix_ip_num),
('volumes', volume_fakes.QUOTA['volumes']),
('network', network_fakes.QUOTA['network']),
('quota_class', True),
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
kwargs_compute = {
'injected_files': compute_fakes.injected_file_num,
'injected_file_content_bytes':
compute_fakes.injected_file_size_num,
'injected_file_path_bytes': compute_fakes.injected_path_size_num,
'key_pairs': compute_fakes.key_pair_num,
'cores': compute_fakes.core_num,
'ram': compute_fakes.ram_num,
'instances': compute_fakes.instance_num,
'metadata_items': compute_fakes.property_num,
'server_groups': compute_fakes.servgroup_num,
'server_group_members': compute_fakes.servgroup_members_num,
}
kwargs_volume = {
'gigabytes': compute_fakes.floating_ip_num,
'snapshots': compute_fakes.fix_ip_num,
'volumes': volume_fakes.QUOTA['volumes'],
}
self.compute_quotas_class_mock.update.assert_called_with(
self.projects[0].name,
**kwargs_compute
)
self.volume_quotas_class_mock.update.assert_called_with(
self.projects[0].name,
**kwargs_volume
)
self.assertNotCalled(self.network_mock.update_quota)
self.assertIsNone(result)
class TestQuotaShow(TestQuota):
def setUp(self):
super(TestQuotaShow, self).setUp()
self.compute_quota = compute_fakes.FakeQuota.create_one_comp_quota()
self.compute_quotas_mock.get.return_value = self.compute_quota
self.compute_default_quota = \
compute_fakes.FakeQuota.create_one_default_comp_quota()
self.compute_quotas_mock.defaults.return_value = \
self.compute_default_quota
self.compute_quotas_class_mock.get.return_value = FakeQuotaResource(
None,
copy.deepcopy(compute_fakes.QUOTA),
loaded=True,
)
self.volume_quota = volume_fakes.FakeQuota.create_one_vol_quota()
self.volume_quotas_mock.get.return_value = self.volume_quota
self.volume_default_quota = \
volume_fakes.FakeQuota.create_one_default_vol_quota()
self.volume_quotas_mock.defaults.return_value = \
self.volume_default_quota
self.volume_quotas_class_mock.get.return_value = FakeQuotaResource(
None,
copy.deepcopy(volume_fakes.QUOTA),
loaded=True,
)
fake_network_endpoint = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.ENDPOINT),
loaded=True,
)
self.service_catalog_mock.get_endpoints.return_value = {
'network': fake_network_endpoint
}
self.app.client_manager.network = network_fakes.FakeNetworkV2Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.network = self.app.client_manager.network
self.network.get_quota = mock.Mock(
return_value=network_fakes.QUOTA,
)
self.network.get_quota_default = mock.Mock(
return_value=network_fakes.QUOTA,
)
self.cmd = quota.ShowQuota(self.app, None)
def test_quota_show(self):
arglist = [
self.projects[0].name,
]
verifylist = [
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.compute_quotas_mock.get.assert_called_once_with(
self.projects[0].id,
)
self.volume_quotas_mock.get.assert_called_once_with(
self.projects[0].id,
)
self.network.get_quota.assert_called_once_with(
self.projects[0].id,
)
self.assertNotCalled(self.network.get_quota_default)
def test_quota_show_with_default(self):
arglist = [
'--default',
self.projects[0].name,
]
verifylist = [
('default', True),
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.compute_quotas_mock.defaults.assert_called_once_with(
self.projects[0].id,
)
self.volume_quotas_mock.defaults.assert_called_once_with(
self.projects[0].id,
)
self.network.get_quota_default.assert_called_once_with(
self.projects[0].id,
)
self.assertNotCalled(self.network.get_quota)
def test_quota_show_with_class(self):
arglist = [
'--class',
self.projects[0].name,
]
verifylist = [
('quota_class', True),
('project', self.projects[0].name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.compute_quotas_class_mock.get.assert_called_once_with(
self.projects[0].name,
)
self.volume_quotas_class_mock.get.assert_called_once_with(
self.projects[0].name,
)
self.assertNotCalled(self.network.get_quota)
self.assertNotCalled(self.network.get_quota_default)
def test_quota_show_no_project(self):
parsed_args = self.check_parser(self.cmd, [], [])
self.cmd.take_action(parsed_args)
self.compute_quotas_mock.get.assert_called_once_with(
identity_fakes.project_id,
)
self.volume_quotas_mock.get.assert_called_once_with(
identity_fakes.project_id,
)
self.network.get_quota.assert_called_once_with(
identity_fakes.project_id,
)
self.assertNotCalled(self.network.get_quota_default)
|
py | 1a3da36631475807721f7a2bc9b77544ae48dce1 | # Modelliere eine Warteschlange von Autos beim TÜV
# Aufgaben: Eingabe des Autokennzeichens eines neuen Kunden
# Anhängen des neuen Kfz-Kennz. an die bestehende Warteschlange
# Ausgabe des Kfz-Kennz. des nächsten Autos
# Entfernen dieses Kennz. anschließend
# Programm beenden
from module_Queue import Queue
print("""Warten beim TÜV
---------------""")
warteschlange = Queue() # Erzeugt ein Objekt "warteschlange" der Klasse "Queue"
warteschlange.__menuetext = """
(N)euer Kunde
(A)bfertigen des nächsten Kunden
(E)nde
"""
wahl = "x"
while not (wahl in "eE" and warteschlange.empty()):
print(warteschlange.__menuetext)
wahl = input("Auswahl: ")
if wahl in ["n", "N"]:
kennzeichen = input("Kennzeichen: ")
warteschlange.enqueue(kennzeichen)
elif wahl in ["a","A"]:
if not warteschlange.empty():
print("Der Nächste ist: ",
warteschlange.dequeue())
else:
print("Die Warteschlange ist leer")
elif (wahl in "eE") and not warteschlange.empty():
print("Es warten noch Kunden!")
print("Kfz-Kennzeichen: ", warteschlange.front())
print("Ich wünsche einen schönen Feierabend!")
|
py | 1a3da49aa74c0ff714f0dff7b86dfab620e76ed0 | def count_genres(row_df):
count = 0
for i, number in row_df.iteritems():
if number > 0.0:
count += 1
return count / len(row_df)
def variance(row_df):
return 1 - row_df.var()
|
py | 1a3da6261e4bf462f6bba70a42c62d5a21388af3 | # Copyright INRIM (https://www.inrim.eu)
# See LICENSE file for full licensing details.
#
# def test_init(test_app):
# response = test_app.get("/action/list_form", headers={"referer": "localhost"})
# assert response.status_code == 200
# assert response.json() == {'action': 'redirect',
# 'url': 'https://test-auth.docker.ininrim.it/inrim-auth/login/forms?redirect=localhost',
# 'headers': {'referer': 'localhost'}}
# def test_status(test_app):
# response = test_app.get("/status")
# assert response.status_code == 200
# assert response.json() == {"status": "live"}
#
|
py | 1a3da71be6e5c9674e7d6a378bbf1bad1e8b1b06 | import json
import logging
from os import execv, unlink
import subprocess
from threading import Thread
from time import sleep
import netifaces
from fiotest.api import API
from fiotest.spec import Reboot, Sequence, Test, TestSpec
log = logging.getLogger()
class SpecStopped(Exception):
pass
class SpecRunner:
reboot_state = "/var/lib/fiotest/reboot.state"
def __init__(self, spec: TestSpec):
self.spec = spec
self.running = False
self.thread = Thread(target=self.run)
def start(self):
self.running = True
self.thread.start()
def run(self):
completed = 0
try:
with open(self.reboot_state) as f:
data = json.load(f)
completed = data["seq_idx"]
log.warning(
"Detectected rebooted sequence, continuing after sequence %d",
completed,
)
unlink(self.reboot_state)
API("/var/sota", False).complete_test(data["test_id"], {})
except FileNotFoundError:
pass # This is the "normal" case - no reboot has occurred
try:
for i, seq in enumerate(self.spec.sequence):
self._assert_running()
if i < completed:
log.debug("Skipping seq %d", i)
continue
log.info("Executing seq %d", i)
if seq.reboot:
self._reboot(i, seq.reboot)
else:
# run_tests recursively decrements seq.repeat.total
# we need to keep a copy of this value so that testing
# can be repeated
if seq.repeat:
total = seq.repeat.total
self._run_tests(seq)
if seq.repeat:
seq.repeat.total = total
except SpecStopped:
log.warning("Sequence has been stopped before completion")
log.info("Testing complete")
def stop(self):
log.info("Stopping run")
self.running = False
def join(self):
self.thread.join()
def _assert_running(self):
if not self.running:
raise SpecStopped()
def _reboot(self, seq_idx: int, reboot: Reboot):
log.warning("rebooting!!!!")
test_id = API("/var/sota", False).start_test("reboot")
with open(self.reboot_state, "w") as f:
state = {"seq_idx": seq_idx + 1, "test_id": test_id}
json.dump(state, f)
execv(reboot.command[0], reboot.command)
def _run_test(self, test: Test):
host_ip = netifaces.gateways()["default"][netifaces.AF_INET][0]
args = ["/usr/local/bin/fio-test-wrap", test.name]
if test.on_host:
args.extend(
[
"sshpass",
"-pfio",
"ssh",
"-o",
"StrictHostKeyChecking no",
"fio@" + host_ip,
]
)
args.extend(test.command)
with open("/tmp/tmp.log", "wb") as f:
p = subprocess.Popen(args, stderr=f, stdout=f)
while p.poll() is None:
if not self.running:
log.info("Killing test")
p.kill()
return
sleep(1)
rc = p.wait()
if rc != 0:
log.error("Test exited with %d", rc)
def _run_tests(self, seq: Sequence):
if seq.tests:
for test in seq.tests:
self._assert_running()
log.info("Executing test: %s", test.name)
self._run_test(test)
if seq.repeat and seq.repeat.total != 1:
if seq.repeat.total > 0:
seq.repeat.total -= 1
self._assert_running()
log.info("Repeating sequence in %d seconds", seq.repeat.delay_seconds)
sleep(seq.repeat.delay_seconds)
self._run_tests(seq)
|
py | 1a3da79452fd4ee7d747431a38749212eaa7470d | from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.db.models import Count
from django.contrib import messages
from .models import Poll, Choice, Vote
from .forms import PollAddForm, EditPollForm, ChoiceAddForm
@login_required()
def polls_list(request):
all_polls = Poll.objects.all()
search_term = ''
if 'name' in request.GET:
all_polls = all_polls.order_by('text')
if 'date' in request.GET:
all_polls = all_polls.order_by('pub_date')
if 'vote' in request.GET:
all_polls = all_polls.annotate(Count('vote')).order_by('vote__count')
if 'search' in request.GET:
search_term = request.GET['search']
all_polls = all_polls.filter(text__icontains=search_term)
paginator = Paginator(all_polls, 6) # Show 6 contacts per page
page = request.GET.get('page')
polls = paginator.get_page(page)
get_dict_copy = request.GET.copy()
params = get_dict_copy.pop('page', True) and get_dict_copy.urlencode()
print(params)
context = {
'polls': polls,
'params': params,
'search_term': search_term,
}
return render(request, 'polls/polls_list.html', context)
@login_required()
def list_by_user(request):
all_polls = Poll.objects.filter(owner=request.user)
paginator = Paginator(all_polls, 7) # Show 7 contacts per page
page = request.GET.get('page')
polls = paginator.get_page(page)
context = {
'polls': polls,
}
return render(request, 'polls/polls_list.html', context)
@login_required()
def polls_add(request):
if request.user.has_perm('polls.add_poll'):
if request.method == 'POST':
form = PollAddForm(request.POST)
if form.is_valid:
poll = form.save(commit=False)
poll.owner = request.user
poll.save()
new_choice1 = Choice(
poll=poll, choice_text=form.cleaned_data['choice1']).save()
new_choice2 = Choice(
poll=poll, choice_text=form.cleaned_data['choice2']).save()
messages.success(
request, "Poll & Choices added successfully", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:list')
else:
form = PollAddForm()
context = {
'form': form,
}
return render(request, 'polls/add_poll.html', context)
else:
return HttpResponse("Sorry but you don't have permission to do that!")
@login_required
def polls_edit(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = EditPollForm(request.POST, instance=poll)
if form.is_valid:
form.save()
messages.success(request, "Poll Updated successfully",
extra_tags='alert alert-success alert-dismissible fade show')
return redirect("polls:list")
else:
form = EditPollForm(instance=poll)
return render(request, "polls/poll_edit.html", {'form': form, 'poll': poll})
@login_required
def polls_delete(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
poll.delete()
messages.success(request, "Poll Deleted successfully",
extra_tags='alert alert-success alert-dismissible fade show')
return redirect("polls:list")
@login_required
def add_choice(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = ChoiceAddForm(request.POST)
if form.is_valid:
new_choice = form.save(commit=False)
new_choice.poll = poll
new_choice.save()
messages.success(
request, "Choice added successfully", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
else:
form = ChoiceAddForm()
context = {
'form': form,
}
return render(request, 'polls/add_choice.html', context)
@login_required
def choice_edit(request, choice_id):
choice = get_object_or_404(Choice, pk=choice_id)
poll = get_object_or_404(Poll, pk=choice.poll.id)
if request.user != poll.owner:
return redirect('home')
if request.method == 'POST':
form = ChoiceAddForm(request.POST, instance=choice)
if form.is_valid:
new_choice = form.save(commit=False)
new_choice.poll = poll
new_choice.save()
messages.success(
request, "Choice Updated successfully", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
else:
form = ChoiceAddForm(instance=choice)
context = {
'form': form,
'edit_choice': True,
'choice': choice,
}
return render(request, 'polls/add_choice.html', context)
@login_required
def choice_delete(request, choice_id):
choice = get_object_or_404(Choice, pk=choice_id)
poll = get_object_or_404(Poll, pk=choice.poll.id)
if request.user != poll.owner:
return redirect('home')
choice.delete()
messages.success(
request, "Choice Deleted successfully", extra_tags='alert alert-success alert-dismissible fade show')
return redirect('polls:edit', poll.id)
def poll_detail(request, poll_id):
poll = get_object_or_404(Poll, id=poll_id)
if not poll.active:
return render(request, 'polls/poll_result.html', {'poll': poll})
loop_count = poll.choice_set.count()
context = {
'poll': poll,
'loop_time': range(0, loop_count),
}
return render(request, 'polls/poll_detail.html', context)
@login_required
def poll_vote(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
# choice_id = request.POST.get('choice')
choice_ids = request.POST.getlist('choice')
print(f'choice_ids равно {choice_ids}')
if not poll.user_can_vote(request.user):
messages.error(
request, "You already voted this poll", extra_tags='alert alert-warning alert-dismissible fade show')
return redirect("polls:list")
if len(choice_ids) > 0:
# когда чекбоксы, то нужно передавать несколько choice_id
# -----------
for choice_item in choice_ids:
print(f'choice_item равно {choice_item}, тип {type(choice_item)}')
choice = Choice.objects.get(id=choice_item)
vote = Vote(user=request.user, poll=poll, choice=choice)
vote.save()
print(f'Проголосовал: {vote}')
return render(request, 'polls/poll_result.html', {'poll': poll})
# -----------
# choice = Choice.objects.get(id=choice_id)
# vote = Vote(user=request.user, poll=poll, choice=choice)
# vote.save()
# return render(request, 'polls/poll_result.html', {'poll': poll})
# -----------
else:
messages.error(
request, "No choice selected", extra_tags='alert alert-warning alert-dismissible fade show')
return redirect("polls:detail", poll_id)
return render(request, 'polls/poll_result.html', {'poll': poll})
@login_required
def endpoll(request, poll_id):
poll = get_object_or_404(Poll, pk=poll_id)
if request.user != poll.owner:
return redirect('home')
if poll.active is True:
poll.active = False
poll.save()
return render(request, 'polls/poll_result.html', {'poll': poll})
else:
return render(request, 'polls/poll_result.html', {'poll': poll})
|
py | 1a3da7a6ffcd73542d42846ccc7d9afec0cfa13f | """Subclass of FDialog, which is generated by wxFormBuilder."""
import wx
import mainframe
# Implementing FDialog
class FDialogEvent( mainframe.FDialog ):
def __init__( self, parent ):
mainframe.FDialog.__init__( self, parent )
|
py | 1a3da818990c3e680c2c634e1cac670c4c692419 | # Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import dpctl
import base_bs_erf
import numba_dppy
from math import log, sqrt, exp, erf
@numba_dppy.kernel
def black_scholes( nopt, price, strike, t, rate, vol, call, put):
mr = -rate
sig_sig_two = vol * vol * 2
i = numba_dppy.get_global_id(0)
P = price[i]
S = strike [i]
T = t [i]
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = 1./sqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
r = P * d1 - Se * d2
call [i] = r
put [i] = r - P + Se
def black_scholes_driver(nopt, price, strike, t, rate, vol, call, put):
with dpctl.device_context("opencl:gpu"):
black_scholes[nopt,numba_dppy.DEFAULT_LOCAL_SIZE]( nopt, price, strike, t, rate, vol, call, put )
base_bs_erf.run("Numba@jit-loop-par", black_scholes_driver, nparr=True, pass_args=True)
|
py | 1a3da89006b00155e7d2785aab2c7586a4ba46c0 | from db import connection
cnx = connection()
cursor = cnx.cursor()
def execute(names, query, cursor=cursor):
print(query)
cursor.execute(query)
print('\t'.join(names))
for tpl in cursor:
print('\t'.join(str(s) for s in tpl))
print()
def where_clauses(no_forks=False, language=None):
where_clauses = []
if no_forks:
where_clauses.append("is_fork_project=0")
if language is not None:
where_clauses.append(f"main_language='{language}'")
if bool(where_clauses):
conjunction = ' AND '.join(where_clauses)
return f" WHERE {conjunction} "
else:
return ""
def q_count(no_forks=False, language=None):
q = "SELECT COUNT(*) FROM repo ";
q += where_clauses(no_forks, language)
return ["count"], q
def q_license_count(no_forks=False, language=None):
q = "SELECT license, COUNT(*) AS license_count FROM repo "
q += where_clauses(no_forks, language)
q += "GROUP BY license ORDER BY license_count DESC"
return ["license", "license_count"], q
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--language")
args = parser.parse_args()
print("-" * 10)
print("including forks")
print("-" * 10)
execute(*q_count(language=args.language))
execute(*q_license_count(language=args.language))
print("-" * 10)
print("no forks")
print("-" * 10)
execute(*q_count(no_forks=True, language=args.language))
execute(*q_license_count(no_forks=True, language=args.language))
cursor.close()
cnx.close()
|
py | 1a3da9358260d64c998ef75831252e31803c7097 | import pandas as pd
import requests
import us
from bs4 import BeautifulSoup
from can_tools.scrapers.base import CMU
from can_tools.scrapers.official.base import CountyDashboard
class ArizonaMaricopaVaccine(CountyDashboard):
"""
Fetch county level Covid-19 vaccination data from official Maricopa county website
"""
source = "https://www.maricopa.gov/5641/COVID-19-Vaccine"
source_name = "Maricopa County"
has_location = False
location_type = "county"
state_fips = int(us.states.lookup("Arizona").fips)
def fetch(self):
# Set url of website
url = "https://www.maricopa.gov/5641/COVID-19-Vaccine"
request = requests.get(url)
if not request.ok:
message = f"Could not request data from {url}"
raise ValueError(message)
return request.content
def normalize(self, data) -> pd.DataFrame:
# Read data into Beautiful Soup
bs = BeautifulSoup(data, "html.parser")
# Find the doses given
doses = bs.find_all("h2", class_="dataNumber")[1::1][0].text.replace(",", "")
# Create data frame
df = pd.DataFrame(
{
"location_name": ["Maricopa"],
"total_vaccine_doses_administered": pd.to_numeric(doses),
}
)
# Create dictionary for columns to map
crename = {
"total_vaccine_doses_administered": CMU(
category="total_vaccine_doses_administered",
measurement="cumulative",
unit="doses",
),
}
# Move things into long format
df = df.melt(id_vars=["location_name"], value_vars=crename.keys()).dropna()
# Determine the category of each observation
out = self.extract_CMU(df, crename)
# Add rows that don't change
out["vintage"] = self._retrieve_vintage()
out["dt"] = self._retrieve_dt("US/Arizona")
return out
|
py | 1a3da991815fa7415eb0decc5817f43004dbbbf8 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Util class or function."""
from mindspore.train.serialization import load_checkpoint
import mindspore.nn as nn
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', tb_writer=None):
self.name = name
self.fmt = fmt
self.reset()
self.tb_writer = tb_writer
self.cur_step = 1
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.tb_writer is not None:
self.tb_writer.add_scalar(self.name, self.val, self.cur_step)
self.cur_step += 1
def __str__(self):
fmtstr = '{name}:{avg' + self.fmt + '}'
return fmtstr.format(**self.__dict__)
def load_backbone(net, ckpt_path, args):
"""Load darknet53 backbone checkpoint."""
param_dict = load_checkpoint(ckpt_path)
yolo_backbone_prefix = 'feature_map.backbone'
darknet_backbone_prefix = 'network.backbone'
find_param = []
not_found_param = []
for name, cell in net.cells_and_names():
if name.startswith(yolo_backbone_prefix):
name = name.replace(yolo_backbone_prefix, darknet_backbone_prefix)
if isinstance(cell, (nn.Conv2d, nn.Dense)):
darknet_weight = '{}.weight'.format(name)
darknet_bias = '{}.bias'.format(name)
if darknet_weight in param_dict:
cell.weight.default_input = param_dict[darknet_weight].data
find_param.append(darknet_weight)
else:
not_found_param.append(darknet_weight)
if darknet_bias in param_dict:
cell.bias.default_input = param_dict[darknet_bias].data
find_param.append(darknet_bias)
else:
not_found_param.append(darknet_bias)
elif isinstance(cell, (nn.BatchNorm2d, nn.BatchNorm1d)):
darknet_moving_mean = '{}.moving_mean'.format(name)
darknet_moving_variance = '{}.moving_variance'.format(name)
darknet_gamma = '{}.gamma'.format(name)
darknet_beta = '{}.beta'.format(name)
if darknet_moving_mean in param_dict:
cell.moving_mean.default_input = param_dict[darknet_moving_mean].data
find_param.append(darknet_moving_mean)
else:
not_found_param.append(darknet_moving_mean)
if darknet_moving_variance in param_dict:
cell.moving_variance.default_input = param_dict[darknet_moving_variance].data
find_param.append(darknet_moving_variance)
else:
not_found_param.append(darknet_moving_variance)
if darknet_gamma in param_dict:
cell.gamma.default_input = param_dict[darknet_gamma].data
find_param.append(darknet_gamma)
else:
not_found_param.append(darknet_gamma)
if darknet_beta in param_dict:
cell.beta.default_input = param_dict[darknet_beta].data
find_param.append(darknet_beta)
else:
not_found_param.append(darknet_beta)
args.logger.info('================found_param {}========='.format(len(find_param)))
args.logger.info(find_param)
args.logger.info('================not_found_param {}========='.format(len(not_found_param)))
args.logger.info(not_found_param)
args.logger.info('=====load {} successfully ====='.format(ckpt_path))
return net
def default_wd_filter(x):
"""default weight decay filter."""
parameter_name = x.name
if parameter_name.endswith('.bias'):
# all bias not using weight decay
return False
if parameter_name.endswith('.gamma'):
# bn weight bias not using weight decay, be carefully for now x not include BN
return False
if parameter_name.endswith('.beta'):
# bn weight bias not using weight decay, be carefully for now x not include BN
return False
return True
def get_param_groups(network):
"""Param groups for optimizer."""
decay_params = []
no_decay_params = []
for x in network.trainable_params():
parameter_name = x.name
if parameter_name.endswith('.bias'):
# all bias not using weight decay
no_decay_params.append(x)
elif parameter_name.endswith('.gamma'):
# bn weight bias not using weight decay, be carefully for now x not include BN
no_decay_params.append(x)
elif parameter_name.endswith('.beta'):
# bn weight bias not using weight decay, be carefully for now x not include BN
no_decay_params.append(x)
else:
decay_params.append(x)
return [{'params': no_decay_params, 'weight_decay': 0.0}, {'params': decay_params}]
class ShapeRecord:
"""Log image shape."""
def __init__(self):
self.shape_record = {
320: 0,
352: 0,
384: 0,
416: 0,
448: 0,
480: 0,
512: 0,
544: 0,
576: 0,
608: 0,
'total': 0
}
def set(self, shape):
if len(shape) > 1:
shape = shape[0]
shape = int(shape)
self.shape_record[shape] += 1
self.shape_record['total'] += 1
def show(self, logger):
for key in self.shape_record:
rate = self.shape_record[key] / float(self.shape_record['total'])
logger.info('shape {}: {:.2f}%'.format(key, rate*100))
|
py | 1a3daa249c1f6dbec3ef45f67c84779b36737c66 |
from pprint import pprint as print
from js9 import j
from .RaftServer import RaftServer
from .RaftCluster import RaftCluster
JSConfigBase = j.tools.configmanager.base_class_configs
class RaftServerFactory(JSConfigBase):
def __init__(self):
self.__jslocation__ = "j.servers.raftserver"
super(RaftServerFactory, self).__init__(RaftCluster)
def get_by_params(self, instance="main", secret="1233", members = "localhost:4441,localhost:4442,localhost:4443", cmd="j.servers.raftserver.example_server_class_get()"):
data = {}
data["secret_"] = secret
data["members"] = members
data ["cmd"] = cmd
return self.get(instance=instance, data=data, create=True)
def example_server_class_get(self):
return RaftServer
def start_local(self,nrservers=3,startport=4000,cmd="j.servers.raftserver.example_server_class_get()" ,secret="1233"):
"""
start local cluster of 5 nodes, will be run in tmux
"""
members=""
for i in range(nrservers):
members+="localhost:%s,"%(startport+i)
members=members.rstrip(",")
cluster = self.get_by_params( instance="main", secret=secret, members =members,cmd=cmd)
cluster.start(background=True)
def test(self):
"""
js9 'j.servers.raftserver.test()'
"""
self.start_local(nrservers=4,startport=6000,cmd="j.servers.raftserver.example_server_class_get()")
def test_nopasswd(self):
"""
js9 'j.servers.raftserver.test_nopasswd()'
"""
self.start_local(nrservers=30,startport=6000,cmd="j.servers.raftserver.example_server_class_get()",secret="")
|
py | 1a3daafa10ad5b3b3a13d2fd0dab30efd5dfd503 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.db import migrations
from pipeline.models import PipelineInstance, PipelineTemplate
from pipeline.contrib.statistics.models import InstanceInPipeline, TemplateInPipeline
def load_data(apps, schema_editor):
# 清空数据
TemplateInPipeline.objects.all().delete()
InstanceInPipeline.objects.all().delete()
template_list = PipelineTemplate.objects.filter(is_deleted=False)
template_data = []
for template in template_list:
template_id = template.template_id
try:
result = statistics_total(template.data)
data = TemplateInPipeline(
template_id=template_id,
atom_total=result["atom_total"],
subprocess_total=result["subprocess_total"],
gateways_total=result["gateways_total"],
)
template_data.append(data)
except Exception:
pass
TemplateInPipeline.objects.bulk_create(template_data)
instance_list = PipelineInstance.objects.filter(is_deleted=False)
instance_data = []
for instance in instance_list:
instance_id = instance.instance_id
try:
result = statistics_total(instance.execution_data)
data = InstanceInPipeline(
instance_id=instance_id,
atom_total=result["atom_total"],
subprocess_total=result["subprocess_total"],
gateways_total=result["gateways_total"],
)
instance_data.append(data)
except Exception:
pass
InstanceInPipeline.objects.bulk_create(instance_data)
def statistics_total(pipeline_tree):
atom_total = 0
subprocess_total = 0
tree_activities = pipeline_tree["activities"]
# 获取网关数量
gateways_total = len(pipeline_tree["gateways"])
# 遍历activities节点
for activity in tree_activities:
activity_type = tree_activities[activity]["type"]
if activity_type == "ServiceActivity":
atom_total += 1
elif activity_type == "SubProcess":
subprocess_total += 1
return {"atom_total": atom_total, "subprocess_total": subprocess_total, "gateways_total": gateways_total}
class Migration(migrations.Migration):
dependencies = [("statistics", "0006_auto_20181115_1208")]
operations = [migrations.RunPython(load_data)]
|
py | 1a3dab73cc6a63c45e2ffb0c83687d9366678dfb | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# [TEST_TAGS]
# eviction:prepare
# [END_TAGS]
import wttest
from wtscenario import make_scenarios
# test_prepare12.py
# Test update restore of a page with prepared update.
class test_prepare12(wttest.WiredTigerTestCase):
conn_config = 'cache_size=2MB'
format_values = [
('column', dict(key_format='r', value_format='S')),
('column_fix', dict(key_format='r', value_format='8t')),
('row_integer', dict(key_format='i', value_format='S')),
]
scenarios = make_scenarios(format_values)
def test_prepare_update_restore(self):
uri = "table:test_prepare12"
format = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
self.session.create(uri, format)
if self.value_format == '8t':
value_a = 97
value_b = 98
value_aaa = 65
else:
value_a = 'a'
value_b = 'b'
value_aaa = 'a' * 500
# Prepare a transaction
cursor = self.session.open_cursor(uri, None)
self.session.begin_transaction()
cursor[1] = value_a
self.session.prepare_transaction('prepare_timestamp=' + self.timestamp_str(1))
# Insert an uncommitted key
session2 = self.conn.open_session(None)
cursor2 = session2.open_cursor(uri, None)
session2.begin_transaction()
cursor2[2] = value_b
# Insert a bunch of other content to fill the database to trigger eviction.
session3 = self.conn.open_session(None)
cursor3 = session3.open_cursor(uri, None)
for i in range(3, 101):
session3.begin_transaction()
cursor3[i] = value_aaa
session3.commit_transaction()
# Commit the prepared update
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(1) + ',durable_timestamp=' + self.timestamp_str(2))
# Read the prepared update
self.session.begin_transaction('read_timestamp=' + self.timestamp_str(2))
self.assertEqual(cursor[1], value_a)
|
py | 1a3dacbf03dcc8ee276b44eab136ed6887a77668 | from django.db import models
from django.utils import timezone
class TaskList(models.Model):
name = models.CharField(max_length = 100)
taskid = models.IntegerField()
class Record(models.Model):
name = models.CharField(verbose_name="process name",max_length=40)
# 市町村名
start_time = models.DateTimeField(verbose_name="time process start",default=timezone.now)
#start_date = models.DateField()
status = models.CharField(verbose_name="process status",max_length = 10)
QT = models.CharField(verbose_name="query sets", max_length=100) |
py | 1a3daee68dbeb371828c9650900dba6be74b5d8c | # Generated by Django 3.2.2 on 2021-05-10 05:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('database', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PreferenceForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Form Name')),
('is_taking_responses', models.BooleanField(default=True)),
('is_active', models.BooleanField(default=True)),
('course_set', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course_form', to='database.modelset')),
('student_set', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='student_form', to='database.modelset')),
],
),
migrations.CreateModel(
name='PreferenceFormEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('student_name', models.CharField(max_length=100, verbose_name='Student Name')),
('email', models.EmailField(max_length=254, verbose_name='Student Email')),
('courses', models.ManyToManyField(to='database.Course')),
('preference_form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='entries', to='datacollection.preferenceform')),
],
),
]
|
py | 1a3db0161c58edbcf0f9b4a9d9fa900f22b2d008 | from setuptools import find_packages, setup
import taggit
with open("README.rst") as f:
readme = f.read()
setup(
name="django-taggit",
version=".".join(str(i) for i in taggit.VERSION),
description="django-taggit is a reusable Django application for simple tagging.",
long_description=readme,
author="Alex Gaynor",
author_email="[email protected]",
url="https://github.com/jazzband/django-taggit/tree/master",
packages=find_packages(exclude=("tests*",)),
package_data={"taggit": ["locale/*/LC_MESSAGES/*"]},
license="BSD",
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
install_requires=["Django>=1.11"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.0",
"Framework :: Django :: 2.1",
"Framework :: Django :: 2.2",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
include_package_data=True,
zip_safe=False,
)
|
py | 1a3db01b59c2fe8e058d7a8bc62977e52a7a42cb | import collections
import glob
import itertools
import logging
import os
from dxtbx.imageset import ImageSequence
from dxtbx.model.experiment_list import (
BeamComparison,
DetectorComparison,
ExperimentList,
ExperimentListFactory,
GoniometerComparison,
)
from dxtbx.sequence_filenames import locate_files_matching_template_string
from dials.command_line.dials_import import ManualGeometryUpdater
from dials.util.options import geometry_phil_scope
from scitbx.array_family import flex
from xia2.Handlers.Phil import PhilIndex
logger = logging.getLogger("xia2.Schema")
class _ImagesetCache(dict):
pass
imageset_cache = _ImagesetCache()
def longest_common_substring(s1, s2):
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest : x_longest]
def load_imagesets(
template,
directory,
id_image=None,
image_range=None,
use_cache=True,
reversephi=False,
):
global imageset_cache
from xia2.Applications.xia2setup import known_hdf5_extensions
full_template_path = os.path.join(directory, template)
if full_template_path not in imageset_cache or not use_cache:
params = PhilIndex.params.xia2.settings
compare_beam = BeamComparison(
wavelength_tolerance=params.input.tolerance.beam.wavelength,
direction_tolerance=params.input.tolerance.beam.direction,
polarization_normal_tolerance=params.input.tolerance.beam.polarization_normal,
polarization_fraction_tolerance=params.input.tolerance.beam.polarization_fraction,
)
compare_detector = DetectorComparison(
fast_axis_tolerance=params.input.tolerance.detector.fast_axis,
slow_axis_tolerance=params.input.tolerance.detector.slow_axis,
origin_tolerance=params.input.tolerance.detector.origin,
)
compare_goniometer = GoniometerComparison(
rotation_axis_tolerance=params.input.tolerance.goniometer.rotation_axis,
fixed_rotation_tolerance=params.input.tolerance.goniometer.fixed_rotation,
setting_rotation_tolerance=params.input.tolerance.goniometer.setting_rotation,
)
scan_tolerance = params.input.tolerance.scan.oscillation
# If diamond anvil cell data, always use dynamic shadowing
high_pressure = PhilIndex.params.dials.high_pressure.correction
format_kwargs = {
"dynamic_shadowing": params.input.format.dynamic_shadowing or high_pressure,
"multi_panel": params.input.format.multi_panel,
}
if os.path.splitext(full_template_path)[-1] in known_hdf5_extensions:
# if we are passed the correct file, use this, else look for a master
# file (i.e. something_master.h5)
if os.path.exists(full_template_path) and os.path.isfile(
full_template_path
):
master_file = full_template_path
else:
g = glob.glob(os.path.join(directory, "*_master.h5"))
master_file = None
for p in g:
substr = longest_common_substring(template, p)
if substr:
if master_file is None or (
len(substr)
> len(longest_common_substring(template, master_file))
):
master_file = p
if master_file is None:
raise RuntimeError("Can't find master file for %s" % full_template_path)
unhandled = []
experiments = ExperimentListFactory.from_filenames(
[master_file],
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
)
assert len(unhandled) == 0, (
"unhandled image files identified: %s" % unhandled
)
else:
params = PhilIndex.get_python_object()
read_all_image_headers = params.xia2.settings.read_all_image_headers
if read_all_image_headers:
paths = sorted(
locate_files_matching_template_string(full_template_path)
)
unhandled = []
experiments = ExperimentListFactory.from_filenames(
paths,
unhandled=unhandled,
compare_beam=compare_beam,
compare_detector=compare_detector,
compare_goniometer=compare_goniometer,
scan_tolerance=scan_tolerance,
format_kwargs=format_kwargs,
)
assert len(unhandled) == 0, (
"unhandled image files identified: %s" % unhandled
)
else:
from xia2.Handlers.CommandLine import CommandLine
experiments = ExperimentList()
start_ends = CommandLine.get_start_ends(full_template_path)
if not start_ends:
start_ends.append(None)
for start_end in start_ends:
experiments.extend(
ExperimentList.from_templates(
[full_template_path],
format_kwargs=format_kwargs,
image_range=start_end,
)
)
imagesets = [
iset for iset in experiments.imagesets() if isinstance(iset, ImageSequence)
]
assert len(imagesets) > 0, "no imageset found"
imageset_cache[full_template_path] = collections.OrderedDict()
if reversephi:
for imageset in imagesets:
goniometer = imageset.get_goniometer()
goniometer.set_rotation_axis(
tuple(-g for g in goniometer.get_rotation_axis())
)
reference_geometry = PhilIndex.params.xia2.settings.input.reference_geometry
if reference_geometry is not None and len(reference_geometry) > 0:
update_with_reference_geometry(imagesets, reference_geometry)
# Update the geometry
params = PhilIndex.params.xia2.settings
update_geometry = []
# Then add manual geometry
work_phil = geometry_phil_scope.format(params.input)
diff_phil = geometry_phil_scope.fetch_diff(source=work_phil)
if diff_phil.as_str() != "":
update_geometry.append(ManualGeometryUpdater(params.input))
imageset_list = []
for imageset in imagesets:
for updater in update_geometry:
imageset = updater(imageset)
imageset_list.append(imageset)
imagesets = imageset_list
for imageset in imagesets:
scan = imageset.get_scan()
exposure_times = scan.get_exposure_times()
epochs = scan.get_epochs()
if exposure_times.all_eq(0) or exposure_times[0] == 0:
exposure_times = flex.double(exposure_times.size(), 1)
scan.set_exposure_times(exposure_times)
elif not exposure_times.all_gt(0):
exposure_times = flex.double(exposure_times.size(), exposure_times[0])
scan.set_exposure_times(exposure_times)
if epochs.size() > 1 and not epochs.all_gt(0):
if epochs[0] == 0:
epochs[0] = 1
for i in range(1, epochs.size()):
epochs[i] = epochs[i - 1] + exposure_times[i - 1]
scan.set_epochs(epochs)
_id_image = scan.get_image_range()[0]
imageset_cache[full_template_path][_id_image] = imageset
if id_image is not None:
return [imageset_cache[full_template_path][id_image]]
elif image_range is not None:
for imageset in imageset_cache[full_template_path].values():
scan = imageset.get_scan()
scan_image_range = scan.get_image_range()
if (
image_range[0] >= scan_image_range[0]
and image_range[1] <= scan_image_range[1]
):
b0 = scan.get_batch_offset()
i0 = image_range[0] - scan_image_range[0] + b0
i1 = image_range[1] - scan_image_range[0] + b0
imagesets = [imageset[i0 : i1 + 1]]
assert len(imagesets[0]) == image_range[1] - image_range[0] + 1, len(
imagesets[0]
)
return imagesets
return list(imageset_cache[full_template_path].values())
def update_with_reference_geometry(imagesets, reference_geometry_list):
assert reference_geometry_list is not None
assert len(reference_geometry_list) >= 1
reference_components = load_reference_geometries(reference_geometry_list)
for imageset in imagesets:
reference_geometry = find_relevant_reference_geometry(
imageset, reference_components
)
imageset.set_beam(reference_geometry["beam"])
imageset.set_detector(reference_geometry["detector"])
def load_reference_geometries(geometry_file_list):
logger.debug("Collecting reference instrument models.")
ref_geoms = {
# Note that 'index' is the index of the experiment in the expt list file,
# as per dials.show, rather than the UID string of the experiment.
(expt.detector, expt.beam, f, index)
for f in geometry_file_list
for index, expt in enumerate(ExperimentList.from_file(f, check_format=False))
}
logger.debug("Removing duplicate reference geometries.")
duplicates = set()
for a, b in filter(duplicates.isdisjoint, itertools.combinations(ref_geoms, 2)):
if compare_geometries(a[0], b[0]):
logger.debug(f"Experiment {b[3]} of {b[2]} is a duplicate.")
duplicates.add(b)
ref_geoms -= duplicates
n = len(ref_geoms)
logger.debug(f"Found {n} unique reference geometr{'ies' if n != 1 else 'y'}.")
for geometry in ref_geoms:
logger.debug(f"Experiment {geometry[3]} of {geometry[2]} is unique.")
return [{"detector": geometry[0], "beam": geometry[1]} for geometry in ref_geoms]
def compare_geometries(detectorA, detectorB):
return detectorA.is_similar_to(
detectorB,
fast_axis_tolerance=0.1,
slow_axis_tolerance=0.1,
origin_tolerance=10,
ignore_trusted_range=True,
)
def find_relevant_reference_geometry(imageset, geometry_list):
for geometry in geometry_list:
if compare_geometries(geometry["detector"], imageset.get_detector()):
break
else:
raise Exception("No appropriate reference geometry found")
return geometry
|
py | 1a3db045c5a9a3b78129cd3188c1563c4e9298a5 | '''
Homebrew for Mac OS X
'''
# Import salt libs
import salt
from salt.modules.yumpkg import _compare_versions
def __virtual__():
'''
Confine this module to Mac OS with Homebrew.
'''
if salt.utils.which('brew') and __grains__['os'] == 'MacOS':
return 'pkg'
def list_pkgs(*args):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example::
salt '*' pkg.list_pkgs
'''
cmd = 'brew list --versions {0}'.format(' '.join(args))
result_dict = {}
for line in __salt__['cmd.run'](cmd).splitlines():
(pkg, version) = line.split(' ')[0:2]
result_dict[pkg] = version
return result_dict
def version(name):
'''
Returns a version if the package is installed, else returns an empty string
CLI Example::
salt '*' pkg.version <package name>
'''
pkgs = list_pkgs(name)
if name in pkgs:
return pkgs[name]
else:
return ''
def remove(pkgs):
'''
Removes packages with ``brew uninstall``
Return a list containing the removed packages:
CLI Example::
salt '*' pkg.remove <package,package,package>
'''
formulas = ' '.join(pkgs.split(','))
cmd = 'brew uninstall {0}'.format(formulas)
return __salt__['cmd.run'](cmd)
def install(pkgs, refresh=False, repo='', skip_verify=False, **kwargs):
'''
Install the passed package(s) with ``brew install``
pkgs
The names of the packages to be installed
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example::
salt '*' pkg.install 'package package package'
'''
if ',' in pkgs:
pkgs = pkgs.split(',')
else:
pkgs = pkgs.split(' ')
old = list_pkgs(*pkgs)
formulas = ' '.join(pkgs)
homebrew_prefix = __salt__['cmd.run']('brew --prefix')
user = __salt__['file.get_user'](homebrew_prefix)
cmd = 'brew install {0}'.format(formulas)
if user != __opts__['user']:
__salt__['cmd.run'](cmd, runas=user)
else:
__salt__['cmd.run'](cmd)
new = list_pkgs(*pkgs)
return _compare_versions(old, new)
def list_upgrades():
'''
Check whether or not an upgrade is available for all packages
CLI Example::
salt '*' pkg.list_upgrades
'''
cmd = 'brew outdated'
return __salt__['cmd.run'](cmd).splitlines()
def upgrade_available(pkg):
'''
Check whether or not an upgrade is available for a given package
CLI Example::
salt '*' pkg.upgrade_available <package name>
'''
return pkg in list_upgrades()
|
py | 1a3db08236aba772dc2cd79afd5c1a7b39d5555c | class Escritor:
def __init__(self, nome):
self.__nome = nome
self.__ferramenta = None
@property
def nome(self):
return self.__nome
@property
def ferramenta(self):
return self.__ferramenta
@ferramenta.setter
def ferramenta(self, ferramenta):
self.__ferramenta = ferramenta
|
py | 1a3db0a67095f4d2f7b4467f53baf06af3e1bc9d | from datetime import datetime
SKILLS = ['overall', 'attack', 'defence', 'strength', 'hitpoints',
'ranged', 'prayer', 'magic', 'cooking', 'woodcutting',
'fletching', 'fishing', 'firemaking', 'crafting', 'smithing',
'mining', 'herblore', 'agility', 'theiving', 'slayer',
'farming', 'hunter']
class Skill:
def __init__(self, rank: int, level: int, experience: int):
self.rank = int(rank)
self.level = int(level)
self.experience = int(experience)
def get_encodable(self):
return {
'rank': self.rank,
'level': self.level,
'experience': self.experience,
}
class HighScore:
def __init__(self, account_id: str, id: str = None,
created_at: datetime = None,
**kwargs: Skill):
self.account_id = account_id
self.id = id
self.created_at = created_at
self._skills = dict()
for name, skill in kwargs.items():
if name not in SKILLS:
raise AttributeError('{key} is not a valid skill'.format(
key=name
))
setattr(self, name, skill)
@property
def skills(self):
return {skill: getattr(self, skill) for skill in SKILLS}
def __setattr__(self, key: str, value):
if key in SKILLS:
if not isinstance(value, Skill):
raise AttributeError('A skill must be an instance of {}'
.format(Skill.__name__))
self._skills[key] = value
super().__setattr__(key, value)
def __getattr__(self, item: str):
if item in SKILLS:
if item not in self._skills:
return None
return self._skills[item]
return super().__getattribute__(item)
def get_encodable(self):
skills = {name: skill.get_encodable() for name, skill in
self.skills.items() if skill is not None}
return {
'account_id': self.account_id,
'id': self.id,
'created_at': self.created_at.isoformat() \
if self.created_at else None,
'skills': skills,
}
def calc_xp_sum(self):
total = 0
for name, skill in self.skills.items():
if skill is None:
continue
if skill.experience < 1:
continue
total += skill.experience
return total
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.