ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5db7bc017e503529cfada0cf27b904eb3412e2
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : 使用gs_guc set方法设置参数partition_lock_upgrade_timeout为3000 ,
观察预期结果
Description :
1.查询partition_lock_upgrade_timeout默认值
2.修改参数值为3000并重启数据库
3.查询修改后的参数值
4.恢复参数默认值
Expect :
1.显示默认值为1800
2.设置成功
3.显示3000
4.默认值恢复成功
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
LOG = Logger()
commonsh = CommonSH('dbuser')
class ClientConnection(unittest.TestCase):
def setUp(self):
LOG.info(
'-----Opengauss_Function_Guc_ClientConnection_Case0194start----')
self.constant = Constant()
def test_partition_lock_upgrade_timeout(self):
LOG.info('--步骤1:查看默认值--')
sql_cmd = commonsh.execut_db_sql('show '
'partition_lock_upgrade_timeout;')
LOG.info(sql_cmd)
self.assertEqual("1800", sql_cmd.split("\n")[-2].strip())
LOG.info('--步骤2:修改参数值为3000并重启数据库--')
msg = commonsh.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
'partition_lock_upgrade_timeout = 3000')
LOG.info(msg)
self.assertTrue(msg)
msg = commonsh.restart_db_cluster()
LOG.info(msg)
status = commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
LOG.info('--步骤3:查询修改后的参数值--')
sql_cmd = commonsh.execut_db_sql('show'
' partition_lock_upgrade_timeout;')
LOG.info(sql_cmd)
self.assertEqual("3000", sql_cmd.split("\n")[-2].strip())
def tearDown(self):
LOG.info('--步骤4:恢复默认值--')
sql_cmd = commonsh.execut_db_sql('show '
'partition_lock_upgrade_timeout;')
LOG.info(sql_cmd)
if "1800" != sql_cmd.splitlines()[-2].strip():
msg = commonsh.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
'partition_lock_upgrade_timeout=1800')
LOG.info(msg)
msg = commonsh.restart_db_cluster()
LOG.info(msg)
status = commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
LOG.info(
'-Opengauss_Function_Guc_ClientConnection_Case0194执行完成-----')
|
py
|
1a5db7d7decb43416357a4aeecb3e6d4826a01de
|
from django.conf.urls import patterns, url, include
urlpatterns = patterns('',
url(r'^auth/', include('djoser.urls.authtoken')),
)
|
py
|
1a5db8812bd108997ff0e8111108d80bcd4adf97
|
"""FindDockerStackFiles
Crawls the fetched application registry directory (from FetchAppRegistry)
and locates all docker-stack.yml files"""
__author__ = '[email protected]'
import os
from modules.steps.base_pipeline_step import BasePipelineStep
from modules.util import environment, data_defs
class FindDockerStackFiles(BasePipelineStep):
def __init__(self):
BasePipelineStep.__init__(self)
self.registry_root = None
def get_required_env_variables(self):
return [environment.REGISTRY_SUB_DIRECTORY]
def get_required_data_keys(self):
return []
def run_step(self, pipeline_data):
self.registry_root = environment.get_registry_path()
pipeline_data[data_defs.STACK_FILES] = self.walk_repository()
return pipeline_data
def walk_repository(self):
stack_files = []
for dirpath, _, files in os.walk(self.registry_root):
for file in files:
if file == 'docker-stack.yml':
stack_files.append(os.path.join(dirpath, file))
self.log.debug('Found %s docker stack files', len(stack_files))
return stack_files
|
py
|
1a5dba31725c74534cd23be812a46a6e0b592260
|
# Ensures proper division mapping on / Needed for Adafruit lib
from __future__ import division
import time
import sys
import RPi.GPIO as GPIO
import Adafruit_PCA9685
# References:
# https://github.com/adafruit/Adafruit_Python_PCA9685/blob/master/examples/simpletest.py
# https://learn.adafruit.com/16-channel-pwm-servo-driver/library-reference
# https://howtomechatronics.com/how-it-works/how-servo-motors-work-how-to-control-servos-using-arduino/
# https://www.adeept.com/learn/tutorial-249.html
# https://www.adeept.com/learn/tutorial-252.html
# Understanding of the code in this file works can be gained by surfing the above links
class DriveTrain:
def __init__(self):
# Right now, we only use this to control the turn servo
# on the drivetrain. Eventually, it will have to be moved to
# a global/shared context to control the claw servos.
self.turn_pwm = Adafruit_PCA9685.PCA9685()
# 50Hz PWM frequency => servo expects updates every 1/50Hz = 20ms
self.turn_pwm.set_pwm_freq(50)
# Pin numbers for back wheels (forward/backward)
self.motor_A_pin1 = 26
self.motor_A_pin2 = 21
self.motor_A_en = 4
self.motor_B_pin1 = 27
self.motor_B_pin2 = 18
self.motor_B_en = 17
# Just declarations
self.motor_pwm_A = 0
self.motor_pwm_B = 0
# Constants for turning servo
self.initPos = 300
self.maxPos = 560
self.minPos = 100
self.angleRange = 180
self.driveSetup()
self.turnSetup()
def driveSetup(self):
GPIO.setwarnings(False)
#Broadcomm chip specific pin nums
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.motor_A_pin1, GPIO.OUT)
GPIO.setup(self.motor_A_pin2, GPIO.OUT)
GPIO.setup(self.motor_A_en, GPIO.OUT)
GPIO.setup(self.motor_B_pin1, GPIO.OUT)
GPIO.setup(self.motor_B_pin2, GPIO.OUT)
GPIO.setup(self.motor_B_en, GPIO.OUT)
self.driveHalt()
#Enclose in try except pass if this don't work
self.motor_pwm_A = GPIO.PWM(self.motor_A_en, 1000)
self.motor_pwm_B = GPIO.PWM(self.motor_B_en, 1000)
def driveHalt(self):
GPIO.output(self.motor_A_pin1, GPIO.LOW)
GPIO.output(self.motor_A_pin2, GPIO.LOW)
GPIO.output(self.motor_A_en, GPIO.LOW)
GPIO.output(self.motor_B_pin1, GPIO.LOW)
GPIO.output(self.motor_B_pin2, GPIO.LOW)
GPIO.output(self.motor_B_en, GPIO.LOW)
self.turn_pwm.set_pwm(0, 0, self.initPos)
def turnSetup(self, initPos = 300, moveTo = 1):
if initPos > self.minPos and initPos < self.maxPos:
if moveTo:
# First arg is ID/channel of the motor - in this case 0
self.turn_pwm.set_pwm(0, 0, initPos)
else:
strErrorMsg = "Drivetrain: Invalid input position" + str(initPos) + ", minPos = " + str(self.minPos) + ", maxPos = " + str(self.maxPos)
print(strErrorMsg)
def moveSpeed(self, speed, direction):
# Correct combination of LOW/HIGH pin settings were found by lifting the bot
# and trying until it worked as intended
if direction == "backward":
GPIO.output(self.motor_A_pin1, GPIO.LOW)
GPIO.output(self.motor_A_pin2, GPIO.HIGH)
self.motor_pwm_A.start(0)
self.motor_pwm_A.ChangeDutyCycle(speed)
GPIO.output(self.motor_B_pin1, GPIO.LOW)
GPIO.output(self.motor_B_pin2, GPIO.HIGH)
self.motor_pwm_B.start(0)
self.motor_pwm_B.ChangeDutyCycle(speed)
elif direction == "forward":
GPIO.output(self.motor_A_pin1, GPIO.HIGH)
GPIO.output(self.motor_A_pin2, GPIO.LOW)
self.motor_pwm_A.start(100)
self.motor_pwm_A.ChangeDutyCycle(speed)
GPIO.output(self.motor_B_pin1, GPIO.HIGH)
GPIO.output(self.motor_B_pin2, GPIO.LOW)
self.motor_pwm_B.start(100)
self.motor_pwm_B.ChangeDutyCycle(speed)
def turnAngle(self, angle):
# Positive input is left, negative input is right
pwmOut = int((self.maxPos - self.minPos)/self.angleRange*angle)
setPos = int(self.initPos + pwmOut)
if setPos > self.maxPos: setPos = self.maxPos
elif setPos < self.minPos: setPos = self.minPos
self.turn_pwm.set_pwm(0, 0, setPos)
def moveAbsoluteDelay(self, speed, angle, timestep):
# Clamp these values on this side to ensure no hardware damage of any sort.
if speed < -100: speed = -100
elif speed > 100: speed = 100
if angle < -60: angle = -60
elif angle > 60: angle = 60
if speed == 0:
self.curAngle = 0
self.driveHalt()
time.sleep(timestep)
return
self.turnAngle(angle)
if speed < 0:
self.moveSpeed(-speed, "backward")
else:
self.moveSpeed(speed, "forward")
time.sleep(timestep)
return
def destroy(self):
# Add logic for to uninstanitiate turn servo
self.driveHalt()
GPIO.cleanup()
|
py
|
1a5dba4512ac66146b145a066f6503c76473f740
|
from .spreadsheets import spreadsheet_to_plan_list # noqa: F401
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
|
py
|
1a5dbac52e4e5f941da74720bab1059c752c3a19
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="EmfLoadOptions.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import ImageLoadOptions
class EmfLoadOptions(ImageLoadOptions):
"""
Emf load options
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, **kwargs): # noqa: E501
"""Initializes new instance of EmfLoadOptions""" # noqa: E501
base = super(EmfLoadOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmfLoadOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a5dbc0c73214a3e4a386d69b69ad4948f3162a7
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
sys.path.append(os.path.abspath('../../scratchcloud'))
# -- Project information -----------------------------------------------------
project = 'scratchcloud'
copyright = '2021, yuwex'
author = 'yuwex'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
py
|
1a5dbc38f60b68f9ccb68e243f88c51ca9870825
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(rc={'figure.figsize':(18,12)})
checkPValue = lambda p,threshold=0.05: "--> Null(H0) hypotesis rejected" if p < threshold else "--> We cannot reject the null hypotesis"
from scipy.stats import t,ttest_1samp
def get_pvalue(item):
neighbourhood_group_mean = neighbourhood_group_description[(neighbourhood_group_description['neighbourhood_group']==item.neighbourhood_group[0])
& (neighbourhood_group_description['room_type']=="Entire home/apt")]['price']['mean']
sample = nyc_abnb[(nyc_abnb['neighbourhood']==item.neighbourhood[0])
& (nyc_abnb['room_type']=="Entire home/apt") ]
f,pval = ttest_1samp(sample['price'] ,neighbourhood_group_mean)
return pval[0]
class PDF(object):
def __init__(self, pdf, size=(200,200)):
self.pdf = pdf
self.size = size
def _repr_html_(self):
return '<iframe src={0} width={1[0]} height={1[1]}></iframe>'.format(self.pdf, self.size)
def _repr_latex_(self):
return r'\includegraphics[width=1.0\textwidth]{{{0}}}'.format(self.pdf)
|
py
|
1a5dbcf6df61e71a767f3b0e4e4bd3cdcc664855
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
from transformers.tokenization_xlnet import (XLNetTokenizer, SPIECE_UNDERLINE)
from .tokenization_tests_commons import CommonTestCases
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'fixtures/test_sentencepiece.model')
class XLNetTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = XLNetTokenizer
def setUp(self):
super(XLNetTokenizationTest, self).setUp()
# We have a SentencePiece fixture for testing
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return XLNetTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u"This is a test"
output_text = u"This is a test"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize(u'This is a test')
self.assertListEqual(tokens, [u'▁This', u'▁is', u'▁a', u'▁t', u'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's', u'é', u'.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids, [8, 21, 84, 55, 24, 19, 7, 0,
602, 347, 347, 347, 3, 12, 66,
46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in',
SPIECE_UNDERLINE + u'', u'<unk>', u'2', u'0', u'0', u'0', u',',
SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u's',
u'<unk>', u'.'])
def test_tokenizer_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'', u'i', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b',
u'or', u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
self.assertListEqual(tokenizer.tokenize(u"H\u00E9llo"), [u"▁he", u"ll", u"o"])
def test_tokenizer_no_lower(self):
tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False)
tokens = tokenizer.tokenize(u"I was born in 92000, and this is falsé.")
self.assertListEqual(tokens, [SPIECE_UNDERLINE + u'I', SPIECE_UNDERLINE + u'was', SPIECE_UNDERLINE + u'b', u'or',
u'n', SPIECE_UNDERLINE + u'in', SPIECE_UNDERLINE + u'',
u'9', u'2', u'0', u'0', u'0', u',', SPIECE_UNDERLINE + u'and', SPIECE_UNDERLINE + u'this',
SPIECE_UNDERLINE + u'is', SPIECE_UNDERLINE + u'f', u'al', u'se', u'.'])
def test_sequence_builders(self):
tokenizer = XLNetTokenizer.from_pretrained("xlnet-base-cased")
text = tokenizer.encode("sequence builders")
text_2 = tokenizer.encode("multi-sequence build")
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_2 + [4, 3]
if __name__ == '__main__':
unittest.main()
|
py
|
1a5dbd26a835aae3b3db335dfd5104f104202da8
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from GridCal.Engine.Devices.transformer import TransformerType
# from math import sqrt
def test_transformer_type():
Vhv = 21 # primary voltage in kV
Vlv = 0.42 # secondary voltage kV
Sn = 0.25 # nominal power in MVA
Pcu = 2.35 # short circuit power (copper losses) kW
Pfe = 0.27 # no load power (iron losses) kW
I0 = 1.0 # no load voltage in %
Vsc = 4.6 # short-circuit voltage in %
obj = TransformerType(hv_nominal_voltage=Vhv,
lv_nominal_voltage=Vlv,
nominal_power=Sn,
copper_losses=Pcu,
short_circuit_voltage=Vsc,
iron_losses=Pfe,
no_load_current=I0,
gr_hv1=0.5, gx_hv1=0.5)
Sbase = 100
z_series, y_shunt = obj.get_impedances(VH=Vhv, VL=Vlv, Sbase=Sbase)
assert np.allclose(z_series, 3.76+18.01j, rtol=0.01)
assert np.allclose(y_shunt, 2.6532597915358445e-06-2.456722029199863e-05j, rtol=0.01)
if __name__ == '__main__':
# template_from_impedances()
test_transformer_type()
|
py
|
1a5dbe65464b458fbdb76fa99b9a7f28198ba17f
|
#!/usr/bin/env python
"""Common representation for ImageCollection and FeatureCollection.
This class is never intended to be instantiated by the user.
"""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
from . import apifunction
from . import deprecation
from . import ee_exception
from . import element
from . import filter # pylint: disable=redefined-builtin
class Collection(element.Element):
"""Base class for ImageCollection and FeatureCollection."""
_initialized = False
def __init__(self, func, args, opt_varName=None):
"""Constructs a collection by initializing its ComputedObject."""
super(Collection, self).__init__(func, args, opt_varName)
@classmethod
def initialize(cls):
"""Imports API functions to this class."""
if not cls._initialized:
apifunction.ApiFunction.importApi(cls, 'Collection', 'Collection')
apifunction.ApiFunction.importApi(
cls, 'AggregateFeatureCollection', 'Collection', 'aggregate_')
cls._initialized = True
@classmethod
def reset(cls):
"""Removes imported API functions from this class.
Also resets the serial ID used for mapping Python functions to 0.
"""
apifunction.ApiFunction.clearApi(cls)
cls._initialized = False
def filter(self, new_filter):
"""Apply a filter to this collection.
Args:
new_filter: Filter to add to this collection.
Returns:
The filtered collection object.
"""
if not new_filter:
raise ee_exception.EEException('Empty filters.')
return self._cast(apifunction.ApiFunction.call_(
'Collection.filter', self, new_filter))
@deprecation.CanUseDeprecated
def filterMetadata(self, name, operator, value):
"""Shortcut to add a metadata filter to a collection.
This is equivalent to self.filter(Filter().metadata(...)).
Args:
name: Name of a property to filter.
operator: Name of a comparison operator as defined
by FilterCollection. Possible values are: "equals", "less_than",
"greater_than", "not_equals", "not_less_than", "not_greater_than",
"starts_with", "ends_with", "not_starts_with", "not_ends_with",
"contains", "not_contains".
value: The value to compare against.
Returns:
The filtered collection.
"""
return self.filter(filter.Filter.metadata_(name, operator, value))
def filterBounds(self, geometry):
"""Shortcut to add a geometry filter to a collection.
Items in the collection with a footprint that fails to intersect
the given geometry will be excluded when the collection is evaluated.
This is equivalent to self.filter(Filter().geometry(...)).
Args:
geometry: The boundary to filter to either as a GeoJSON geometry,
or a FeatureCollection, from which a geometry will be extracted.
Returns:
The filter object.
"""
return self.filter(filter.Filter.geometry(geometry))
def filterDate(self, start, opt_end=None):
"""Shortcut to filter a collection with a date range.
Items in the collection with a time_start property that doesn't
fall between the start and end dates will be excluded.
This is equivalent to self.filter(Filter().date(...)).
Args:
start: The start date as a Date object, a string representation of
a date, or milliseconds since epoch.
opt_end: The end date as a Date object, a string representation of
a date, or milliseconds since epoch.
Returns:
The filter object.
"""
return self.filter(filter.Filter.date(start, opt_end))
def getInfo(self):
"""Returns all the known information about this collection.
This function makes an REST call to to retrieve all the known information
about this collection.
Returns:
The return contents vary but will include at least:
features: an array containing metadata about the items in the
collection that passed all filters.
properties: a dictionary containing the collection's metadata
properties.
"""
return super(Collection, self).getInfo()
def limit(self, maximum, opt_property=None, opt_ascending=None):
"""Limit a collection to the specified number of elements.
This limits a collection to the specified number of elements, optionally
sorting them by a specified property first.
Args:
maximum: The number to limit the collection to.
opt_property: The property to sort by, if sorting.
opt_ascending: Whether to sort in ascending or descending order.
The default is true (ascending).
Returns:
The collection.
"""
args = {'collection': self, 'limit': maximum}
if opt_property is not None:
args['key'] = opt_property
if opt_ascending is not None:
args['ascending'] = opt_ascending
return self._cast(
apifunction.ApiFunction.apply_('Collection.limit', args))
def sort(self, prop, opt_ascending=None):
"""Sort a collection by the specified property.
Args:
prop: The property to sort by.
opt_ascending: Whether to sort in ascending or descending
order. The default is true (ascending).
Returns:
The collection.
"""
args = {'collection': self, 'key': prop}
if opt_ascending is not None:
args['ascending'] = opt_ascending
return self._cast(
apifunction.ApiFunction.apply_('Collection.limit', args))
@staticmethod
def name():
return 'Collection'
@staticmethod
def elementType():
"""Returns the type of the collection's elements."""
return element.Element
def map(self, algorithm, opt_dropNulls=None):
"""Maps an algorithm over a collection.
Args:
algorithm: The operation to map over the images or features of the
collection, a Python function that receives an image or features and
returns one. The function is called only once and the result is
captured as a description, so it cannot perform imperative operations
or rely on external state.
opt_dropNulls: If true, the mapped algorithm is allowed to return nulls,
and the elements for which it returns nulls will be dropped.
Returns:
The mapped collection.
Raises:
ee_exception.EEException: if algorithm is not a function.
"""
element_type = self.elementType()
with_cast = lambda e: algorithm(element_type(e))
return self._cast(apifunction.ApiFunction.call_(
'Collection.map', self, with_cast, opt_dropNulls))
def iterate(self, algorithm, first=None):
"""Iterates over a collection with an algorithm.
Applies a user-supplied function to each element of a collection. The
user-supplied function is given two arguments: the current element, and
the value returned by the previous call to iterate() or the first argument,
for the first iteration. The result is the value returned by the final
call to the user-supplied function.
Args:
algorithm: The function to apply to each element. Must take two
arguments - an element of the collection and the value from the
previous iteration.
first: The initial state.
Returns:
The result of the Collection.iterate() call.
Raises:
ee_exception.EEException: if algorithm is not a function.
"""
element_type = self.elementType()
with_cast = lambda e, prev: algorithm(element_type(e), prev)
return apifunction.ApiFunction.call_(
'Collection.iterate', self, with_cast, first)
|
py
|
1a5dbef34eae75d1b17be0c3f9e43684a014b79f
|
from .base import DiagnosticBase
from .diagnostic_8gig import diagnostic8Gig
from .diagnostic_20gig import diagnostic20Gig
from .diagnostic_maxsize import diagnosticMaxSize
from .diagnostic_network import diagnosticNetwork
class allDiagnostics(DiagnosticBase):
def getName(self):
"""
Returns the human-readable name of the diagnostic
"""
return "Run all available diagnostics"
def getDescription(self):
"""
Returns a description of what the diagnostic does
"""
return "This diagnostic runs all available diagnostics in sequence."
def run(self, logger, args=[]):
"""
Runs the diagnostic
"""
# Run all available diagnostics in turn, storing the results
results = []
diagnostics = [
diagnostic8Gig(),
diagnostic20Gig(),
diagnosticMaxSize(),
diagnosticNetwork(),
]
for index, diagnostic in enumerate(diagnostics):
# Run the diagnostic and report its result
logger.info(
'[all] Running individual diagnostic: "{}"'.format(
diagnostic.getName()
),
True,
)
results.append(diagnostic.run(logger))
logger.info(
"[all] Individual diagnostic result: {}".format(
"passed" if results[-1] == True else "failed"
),
False,
)
# Print a newline after the last diagnostic has run
if index == len(diagnostics) - 1:
print()
# Only report success if all diagnostics succeeded
return False not in results
|
py
|
1a5dbeffde6443d7a93b3d300aa731de3035b568
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import requests
from bs4 import BeautifulSoup
if __name__ == '__main__':
# cookie = '''UM_distinctid=15b3c3debef665-0537941edddf5-1d396853-13c680-15b3c3debf09e1; cisession=927cedc3368ef3642aa8f5a4fcb916f6fa20c0f1; CNZZDATA1000201968=237985065-1491360396-null%7C1491371262; Hm_lvt_f805f7762a9a237a0deac37015e9f6d9=1491364343,1491364573,1491372252,1491372262; Hm_lpvt_f805f7762a9a237a0deac37015e9f6d9=1491372900'''
# header = {
# 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36',
# 'Connection': 'keep-alive',
# 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
# 'Cookie': cookie}
# url = 'https://kankandou.com/book/view/22353.html'
# wbdata = requests.get(url, headers=header).text
# soup = BeautifulSoup(wbdata, 'lxml')
# print(soup)
cookie = {
'UM_distinctid': '15b3c3debef665-0537941edddf5-1d396853-13c680-15b3c3debf09e1',
'cisession': '927cedc3368ef3642aa8f5a4fcb916f6fa20c0f1',
'CNZZDATA1000201968': '237985065-1491360396-null%7C1491371262',
'Hm_lvt_f805f7762a9a237a0deac37015e9f6d9': '1491364343, 1491364573, 1491372252, 1491372262',
'Hm_lpvt_f805f7762a9a237a0deac37015e9f6d9': '1491372900'
}
url = 'https://kankandou.com/book/view/22353.html'
wbdata = requests.get(url, cookies=cookie).text
soup = BeautifulSoup(wbdata, 'lxml')
print(soup)
|
py
|
1a5dbfab6e479696f0a3f9a569c24f6ce8a85edc
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_simple_gateway
short_description: Manages simple-gateway objects on Check Point over Web Services API
description:
- Manages simple-gateway objects on Check Point devices including creating, updating and removing objects.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
type: str
required: True
ip_address:
description:
- IPv4 or IPv6 address. If both addresses are required use ipv4-address and ipv6-address fields explicitly.
type: str
ipv4_address:
description:
- IPv4 address.
type: str
ipv6_address:
description:
- IPv6 address.
type: str
anti_bot:
description:
- Anti-Bot blade enabled.
type: bool
anti_virus:
description:
- Anti-Virus blade enabled.
type: bool
application_control:
description:
- Application Control blade enabled.
type: bool
content_awareness:
description:
- Content Awareness blade enabled.
type: bool
firewall:
description:
- Firewall blade enabled.
type: bool
firewall_settings:
description:
- N/A
type: dict
suboptions:
auto_calculate_connections_hash_table_size_and_memory_pool:
description:
- N/A
type: bool
auto_maximum_limit_for_concurrent_connections:
description:
- N/A
type: bool
connections_hash_size:
description:
- N/A
type: int
maximum_limit_for_concurrent_connections:
description:
- N/A
type: int
maximum_memory_pool_size:
description:
- N/A
type: int
memory_pool_size:
description:
- N/A
type: int
interfaces:
description:
- Network interfaces. When a gateway is updated with a new interfaces, the existing interfaces are removed.
type: list
suboptions:
name:
description:
- Object name.
type: str
anti_spoofing:
description:
- N/A
type: bool
anti_spoofing_settings:
description:
- N/A
type: dict
suboptions:
action:
description:
- If packets will be rejected (the Prevent option) or whether the packets will be monitored (the Detect option).
type: str
choices: ['prevent', 'detect']
ip_address:
description:
- IPv4 or IPv6 address. If both addresses are required use ipv4-address and ipv6-address fields explicitly.
type: str
ipv4_address:
description:
- IPv4 address.
type: str
ipv6_address:
description:
- IPv6 address.
type: str
network_mask:
description:
- IPv4 or IPv6 network mask. If both masks are required use ipv4-network-mask and ipv6-network-mask fields explicitly. Instead of
providing mask itself it is possible to specify IPv4 or IPv6 mask length in mask-length field. If both masks length are required use
ipv4-mask-length and ipv6-mask-length fields explicitly.
type: str
ipv4_network_mask:
description:
- IPv4 network address.
type: str
ipv6_network_mask:
description:
- IPv6 network address.
type: str
mask_length:
description:
- IPv4 or IPv6 network mask length.
type: str
ipv4_mask_length:
description:
- IPv4 network mask length.
type: str
ipv6_mask_length:
description:
- IPv6 network mask length.
type: str
security_zone:
description:
- N/A
type: bool
security_zone_settings:
description:
- N/A
type: dict
suboptions:
auto_calculated:
description:
- Security Zone is calculated according to where the interface leads to.
type: bool
specific_zone:
description:
- Security Zone specified manually.
type: str
tags:
description:
- Collection of tag identifiers.
type: list
topology:
description:
- N/A
type: str
choices: ['automatic', 'external', 'internal']
topology_settings:
description:
- N/A
type: dict
suboptions:
interface_leads_to_dmz:
description:
- Whether this interface leads to demilitarized zone (perimeter network).
type: bool
ip_address_behind_this_interface:
description:
- N/A
type: str
choices: ['not defined', 'network defined by the interface ip and net mask', 'network defined by routing', 'specific']
specific_network:
description:
- Network behind this interface.
type: str
color:
description:
- Color of the object. Should be one of existing colors.
type: str
choices: ['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange',
'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray',
'light green', 'lemon chiffon', 'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive',
'orange', 'red', 'sienna', 'yellow']
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
ips:
description:
- Intrusion Prevention System blade enabled.
type: bool
logs_settings:
description:
- N/A
type: dict
suboptions:
alert_when_free_disk_space_below:
description:
- N/A
type: bool
alert_when_free_disk_space_below_threshold:
description:
- N/A
type: int
alert_when_free_disk_space_below_type:
description:
- N/A
type: str
choices: ['none', 'log', 'popup alert', 'mail alert', 'snmp trap alert', 'user defined alert no.1', 'user defined alert no.2',
'user defined alert no.3']
before_delete_keep_logs_from_the_last_days:
description:
- N/A
type: bool
before_delete_keep_logs_from_the_last_days_threshold:
description:
- N/A
type: int
before_delete_run_script:
description:
- N/A
type: bool
before_delete_run_script_command:
description:
- N/A
type: str
delete_index_files_older_than_days:
description:
- N/A
type: bool
delete_index_files_older_than_days_threshold:
description:
- N/A
type: int
delete_index_files_when_index_size_above:
description:
- N/A
type: bool
delete_index_files_when_index_size_above_threshold:
description:
- N/A
type: int
delete_when_free_disk_space_below:
description:
- N/A
type: bool
delete_when_free_disk_space_below_threshold:
description:
- N/A
type: int
detect_new_citrix_ica_application_names:
description:
- N/A
type: bool
forward_logs_to_log_server:
description:
- N/A
type: bool
forward_logs_to_log_server_name:
description:
- N/A
type: str
forward_logs_to_log_server_schedule_name:
description:
- N/A
type: str
free_disk_space_metrics:
description:
- N/A
type: str
choices: ['mbytes', 'percent']
perform_log_rotate_before_log_forwarding:
description:
- N/A
type: bool
reject_connections_when_free_disk_space_below_threshold:
description:
- N/A
type: bool
reserve_for_packet_capture_metrics:
description:
- N/A
type: str
choices: ['percent', 'mbytes']
reserve_for_packet_capture_threshold:
description:
- N/A
type: int
rotate_log_by_file_size:
description:
- N/A
type: bool
rotate_log_file_size_threshold:
description:
- N/A
type: int
rotate_log_on_schedule:
description:
- N/A
type: bool
rotate_log_schedule_name:
description:
- N/A
type: str
stop_logging_when_free_disk_space_below:
description:
- N/A
type: bool
stop_logging_when_free_disk_space_below_threshold:
description:
- N/A
type: int
turn_on_qos_logging:
description:
- N/A
type: bool
update_account_log_every:
description:
- N/A
type: int
one_time_password:
description:
- N/A
type: str
os_name:
description:
- Gateway platform operating system.
type: str
save_logs_locally:
description:
- Save logs locally on the gateway.
type: bool
send_alerts_to_server:
description:
- Server(s) to send alerts to.
type: list
send_logs_to_backup_server:
description:
- Backup server(s) to send logs to.
type: list
send_logs_to_server:
description:
- Server(s) to send logs to.
type: list
tags:
description:
- Collection of tag identifiers.
type: list
threat_emulation:
description:
- Threat Emulation blade enabled.
type: bool
threat_extraction:
description:
- Threat Extraction blade enabled.
type: bool
url_filtering:
description:
- URL Filtering blade enabled.
type: bool
version:
description:
- Gateway platform version.
type: str
vpn:
description:
- VPN blade enabled.
type: bool
vpn_settings:
description:
- Gateway VPN settings.
type: dict
suboptions:
maximum_concurrent_ike_negotiations:
description:
- N/A
type: int
maximum_concurrent_tunnels:
description:
- N/A
type: int
color:
description:
- Color of the object. Should be one of existing colors.
type: str
choices: ['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon',
'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna', 'yellow']
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
groups:
description:
- Collection of group identifiers.
type: list
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
extends_documentation_fragment: checkpoint_objects
"""
EXAMPLES = """
- name: add-simple-gateway
cp_mgmt_simple_gateway:
ip_address: 192.0.2.1
name: gw1
state: present
- name: set-simple-gateway
cp_mgmt_simple_gateway:
anti_bot: true
anti_virus: true
application_control: true
ips: true
name: test_gateway
state: present
threat_emulation: true
url_filtering: true
- name: delete-simple-gateway
cp_mgmt_simple_gateway:
name: gw1
state: absent
"""
RETURN = """
cp_mgmt_simple_gateway:
description: The checkpoint object created or updated.
returned: always, except when deleting the object.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_objects, api_call
def main():
argument_spec = dict(
name=dict(type='str', required=True),
ip_address=dict(type='str'),
ipv4_address=dict(type='str'),
ipv6_address=dict(type='str'),
anti_bot=dict(type='bool'),
anti_virus=dict(type='bool'),
application_control=dict(type='bool'),
content_awareness=dict(type='bool'),
firewall=dict(type='bool'),
firewall_settings=dict(type='dict', options=dict(
auto_calculate_connections_hash_table_size_and_memory_pool=dict(type='bool'),
auto_maximum_limit_for_concurrent_connections=dict(type='bool'),
connections_hash_size=dict(type='int'),
maximum_limit_for_concurrent_connections=dict(type='int'),
maximum_memory_pool_size=dict(type='int'),
memory_pool_size=dict(type='int')
)),
interfaces=dict(type='list', options=dict(
name=dict(type='str'),
anti_spoofing=dict(type='bool'),
anti_spoofing_settings=dict(type='dict', options=dict(
action=dict(type='str', choices=['prevent', 'detect'])
)),
ip_address=dict(type='str'),
ipv4_address=dict(type='str'),
ipv6_address=dict(type='str'),
network_mask=dict(type='str'),
ipv4_network_mask=dict(type='str'),
ipv6_network_mask=dict(type='str'),
mask_length=dict(type='str'),
ipv4_mask_length=dict(type='str'),
ipv6_mask_length=dict(type='str'),
security_zone=dict(type='bool'),
security_zone_settings=dict(type='dict', options=dict(
auto_calculated=dict(type='bool'),
specific_zone=dict(type='str')
)),
tags=dict(type='list'),
topology=dict(type='str', choices=['automatic', 'external', 'internal']),
topology_settings=dict(type='dict', options=dict(
interface_leads_to_dmz=dict(type='bool'),
ip_address_behind_this_interface=dict(type='str', choices=['not defined', 'network defined by the interface ip and net mask',
'network defined by routing', 'specific']),
specific_network=dict(type='str')
)),
color=dict(type='str', choices=['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan',
'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue',
'firebrick',
'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon',
'coral',
'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange',
'red',
'sienna', 'yellow']),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool')
)),
ips=dict(type='bool'),
logs_settings=dict(type='dict', options=dict(
alert_when_free_disk_space_below=dict(type='bool'),
alert_when_free_disk_space_below_threshold=dict(type='int'),
alert_when_free_disk_space_below_type=dict(type='str', choices=['none',
'log', 'popup alert', 'mail alert', 'snmp trap alert',
'user defined alert no.1',
'user defined alert no.2', 'user defined alert no.3']),
before_delete_keep_logs_from_the_last_days=dict(type='bool'),
before_delete_keep_logs_from_the_last_days_threshold=dict(type='int'),
before_delete_run_script=dict(type='bool'),
before_delete_run_script_command=dict(type='str'),
delete_index_files_older_than_days=dict(type='bool'),
delete_index_files_older_than_days_threshold=dict(type='int'),
delete_index_files_when_index_size_above=dict(type='bool'),
delete_index_files_when_index_size_above_threshold=dict(type='int'),
delete_when_free_disk_space_below=dict(type='bool'),
delete_when_free_disk_space_below_threshold=dict(type='int'),
detect_new_citrix_ica_application_names=dict(type='bool'),
forward_logs_to_log_server=dict(type='bool'),
forward_logs_to_log_server_name=dict(type='str'),
forward_logs_to_log_server_schedule_name=dict(type='str'),
free_disk_space_metrics=dict(type='str', choices=['mbytes', 'percent']),
perform_log_rotate_before_log_forwarding=dict(type='bool'),
reject_connections_when_free_disk_space_below_threshold=dict(type='bool'),
reserve_for_packet_capture_metrics=dict(type='str', choices=['percent', 'mbytes']),
reserve_for_packet_capture_threshold=dict(type='int'),
rotate_log_by_file_size=dict(type='bool'),
rotate_log_file_size_threshold=dict(type='int'),
rotate_log_on_schedule=dict(type='bool'),
rotate_log_schedule_name=dict(type='str'),
stop_logging_when_free_disk_space_below=dict(type='bool'),
stop_logging_when_free_disk_space_below_threshold=dict(type='int'),
turn_on_qos_logging=dict(type='bool'),
update_account_log_every=dict(type='int')
)),
one_time_password=dict(type='str'),
os_name=dict(type='str'),
save_logs_locally=dict(type='bool'),
send_alerts_to_server=dict(type='list'),
send_logs_to_backup_server=dict(type='list'),
send_logs_to_server=dict(type='list'),
tags=dict(type='list'),
threat_emulation=dict(type='bool'),
threat_extraction=dict(type='bool'),
url_filtering=dict(type='bool'),
version=dict(type='str'),
vpn=dict(type='bool'),
vpn_settings=dict(type='dict', options=dict(
maximum_concurrent_ike_negotiations=dict(type='int'),
maximum_concurrent_tunnels=dict(type='int')
)),
color=dict(type='str', choices=['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green',
'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon', 'coral',
'sea green',
'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna',
'yellow']),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
groups=dict(type='list'),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool')
)
argument_spec.update(checkpoint_argument_spec_for_objects)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
api_call_object = 'simple-gateway'
result = api_call(module, api_call_object)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
py
|
1a5dc00c7fe54180e7c7cb1d453da82ccc14705a
|
"""
This simple example shows how you could use MLflow REST API to create new
runs inside an experiment to log parameters/metrics. Using MLflow REST API
instead of MLflow library might be useful to embed in an application where
you don't want to depend on the whole MLflow library, or to make
your own HTTP requests in another programming language (not Python).
For more details on MLflow REST API endpoints check the following page:
https://www.mlflow.org/docs/latest/rest-api.html
"""
import argparse
import os
import time
import requests
_DEFAULT_USER_ID = "unknown"
class MLflowTrackingRestApi:
def __init__(self, hostname, port, experiment_id):
self.base_url = f'http://{hostname}:{str(port)}/api/2.0/preview/mlflow'
self.experiment_id = experiment_id
self.run_id = self.create_run()
def create_run(self):
"""Create a new run for tracking."""
url = f'{self.base_url}/runs/create'
# user_id is deprecated and will be removed from the API in a future release
payload = {
"experiment_id": self.experiment_id,
"start_time": int(time.time() * 1000),
"user_id": _get_user_id(),
}
r = requests.post(url, json=payload)
run_id = None
if r.status_code == 200:
run_id = r.json()["run"]["info"]["run_uuid"]
else:
print("Creating run failed!")
return run_id
def list_experiments(self):
"""Get all experiments."""
url = f'{self.base_url}/experiments/list'
r = requests.get(url)
return r.json()["experiments"] if r.status_code == 200 else None
def log_param(self, param):
"""Log a parameter dict for the given run."""
url = f'{self.base_url}/runs/log-parameter'
payload = {"run_uuid": self.run_id, "key": param["key"], "value": param["value"]}
r = requests.post(url, json=payload)
return r.status_code
def log_metric(self, metric):
"""Log a metric dict for the given run."""
url = f'{self.base_url}/runs/log-metric'
payload = {"run_uuid": self.run_id, "key": metric["key"], "value": metric["value"]}
r = requests.post(url, json=payload)
return r.status_code
def _get_user_id():
"""Get the ID of the user for the current run."""
try:
import pwd
return pwd.getpwuid(os.getuid())[0]
except ImportError:
return _DEFAULT_USER_ID
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser(description="MLflow REST API Example")
parser.add_argument(
"--hostname",
type=str,
default="localhost",
dest="hostname",
help="MLflow server hostname/ip (default: localhost)",
)
parser.add_argument(
"--port",
type=int,
default=5000,
dest="port",
help="MLflow server port number (default: 5000)",
)
parser.add_argument(
"--experiment-id",
type=int,
default=0,
dest="experiment_id",
help="Experiment ID (default: 0)",
)
print("Running mlflow_tracking_rest_api.py")
args = parser.parse_args()
mlflow_rest = MLflowTrackingRestApi(args.hostname, args.port, args.experiment_id)
# Parameter is a key/val pair (str types)
param = {"key": "alpha", "value": "0.1980"}
status_code = mlflow_rest.log_param(param)
if status_code == 200:
print(
"Successfully logged parameter: {} with value: {}".format(param["key"], param["value"])
)
else:
print("Logging parameter failed!")
# Metric is a key/val pair (key/val have str/float types)
metric = {"key": "precision", "value": 0.769}
status_code = mlflow_rest.log_metric(metric)
if status_code == 200:
print(
"Successfully logged parameter: {} with value: {}".format(
metric["key"], metric["value"]
)
)
else:
print("Logging metric failed!")
|
py
|
1a5dc084deb0abdcf9546a3891f982662aa8620d
|
from fuse.server.immunespace.dispatcher import GetObject
import json
import os
import pytest
import numpy as np
# this takes about 20s to return
# go get a session id and group objectIdfrom immunespace for user for this to work:
# https://www.immunespace.org/security/externalToolsView.view?returnUrl=%2Fproject%2FStudies%2Fbegin.view%3F
#g_debug = True
g_debug = False
def test_GetObject():
if os.getenv('TEST_LIBRARY') == "0":
pytest.skip("Only testing docker container")
objectId = os.getenv('GROUP')
username = os.getenv('USERNAME')
sess = os.getenv('APIKEY')
obj = {
"id": objectId,
"resourceType": "eset",
"resource": GetObject(objectId,sess,username)
}
with open('tests/expected/test_1.json', 'r', encoding='utf-8') as f:
expected = json.load(f)
#make smaller chunks for easier debugging
if(g_debug):
max_subjs=3
max_pheno=4
max_genes=5
obj["resource"]["exprs"] = np.array(obj["resource"]["exprs"])[0:max_genes,0:max_subjs].tolist() # 3 genes, 2 subjects
obj["resource"]["featureNames"] = np.array(obj["resource"]["featureNames"])[0:max_genes].tolist()
obj["resource"]["pData"] = np.array(obj["resource"]["pData"])[0:max_pheno,0:max_subjs].tolist() # 4 phenoetypes, 2 subjects
expected["resource"]["exprs"] = np.array(expected["resource"]["exprs"])[0:max_genes,0:max_subjs].tolist() # 3 genes, 2 subjects
expected["resource"]["featureNames"] = np.array(expected["resource"]["featureNames"])[0:max_genes].tolist()
expected["resource"]["pData"] = np.array(expected["resource"]["pData"])[0:max_pheno,0:max_subjs].tolist() # 4 phenoetypes, 2 subjects
# Uncomment this to capture output:
#with open('tests/test_1.out.json', 'w', encoding='utf-8') as f:
# json.dump(obj, f, ensure_ascii=False, indent=4, sort_keys=True)
objs = json.dumps(obj, ensure_ascii=False, indent=4, sort_keys=True)
expecteds = json.dumps(expected, ensure_ascii=False, indent=4, sort_keys=True)
if(g_debug):
print("obj:")
print(obj["resource"]["exprs"])
#print("expected:")
#print(expected["resource"]["exprs"])
# xxx sort the keyes, then copy this to test_func.py
assert objs == expecteds
|
py
|
1a5dc0e602cfdf5b80dd36c4d68c54aff12b51a7
|
from src.soma import soma
def test_deve_retornar_a_soma_entre_dois_numeros():
result = soma(1, 2)
expected = 3
assert result == expected
|
py
|
1a5dc153307595a97acf15aef3393ef9725b11ba
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import List, Optional, Union
import numpy as np
from habitat import get_config as get_task_config
from habitat.config import Config as CN
DEFAULT_CONFIG_DIR = "configs/"
CONFIG_FILE_SEPARATOR = ","
# -----------------------------------------------------------------------------
# EXPERIMENT CONFIG
# -----------------------------------------------------------------------------
_C = CN()
# task config can be a list of conifgs like "A.yaml,B.yaml"
_C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointnav.yaml"
_C.TASK_CONFIG = CN() # task_config will be stored as a config node
_C.CMD_TRAILING_OPTS = [] # store command line options as list of strings
_C.TRAINER_NAME = "ppo"
_C.ENV_NAME = "NavRLEnv"
_C.SIMULATOR_GPU_ID = 0
_C.TORCH_GPU_ID = 0
_C.VIDEO_OPTION = ["disk", "tensorboard"]
_C.TENSORBOARD_DIR = "tb"
_C.WRITER_TYPE = "tb"
_C.VIDEO_DIR = "video_dir"
_C.VIDEO_FPS = 10
_C.VIDEO_RENDER_TOP_DOWN = True
_C.VIDEO_RENDER_ALL_INFO = False
_C.TEST_EPISODE_COUNT = -1
_C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir
_C.NUM_ENVIRONMENTS = 16
_C.NUM_PROCESSES = -1 # depricated
_C.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
_C.CHECKPOINT_FOLDER = "data/checkpoints"
_C.NUM_UPDATES = 10000
_C.NUM_CHECKPOINTS = 10
# Number of model updates between checkpoints
_C.CHECKPOINT_INTERVAL = -1
_C.TOTAL_NUM_STEPS = -1.0
_C.LOG_INTERVAL = 10
_C.LOG_FILE = "train.log"
_C.FORCE_BLIND_POLICY = False
_C.VERBOSE = True
_C.EVAL_KEYS_TO_INCLUDE_IN_NAME = []
# For our use case, the CPU side things are mainly memory copies
# and nothing of substantive compute. PyTorch has been making
# more and more memory copies parallel, but that just ends up
# slowing those down dramatically and reducing our perf.
# This forces it to be single threaded. The default
# value is left as false as it's different than how
# PyTorch normally behaves, but all configs we provide
# set it to true and yours likely should too
_C.FORCE_TORCH_SINGLE_THREADED = False
# -----------------------------------------------------------------------------
# Weights and Biases config
# -----------------------------------------------------------------------------
_C.WB = CN()
# The name of the project on W&B.
_C.WB.PROJECT_NAME = ""
# Logging entity (like your username or team name)
_C.WB.ENTITY = ""
# The group ID to assign to the run. Optional to specify.
_C.WB.GROUP = ""
# The run name to assign to the run. If not specified, W&B will randomly assign a name.
_C.WB.RUN_NAME = ""
# -----------------------------------------------------------------------------
# EVAL CONFIG
# -----------------------------------------------------------------------------
_C.EVAL = CN()
# The split to evaluate on
_C.EVAL.SPLIT = "val"
_C.EVAL.USE_CKPT_CONFIG = True
_C.EVAL.SHOULD_LOAD_CKPT = True
# -----------------------------------------------------------------------------
# REINFORCEMENT LEARNING (RL) ENVIRONMENT CONFIG
# -----------------------------------------------------------------------------
_C.RL = CN()
# -----------------------------------------------------------------------------
# preemption CONFIG
# -----------------------------------------------------------------------------
_C.RL.preemption = CN()
# Append the slurm job ID to the resume state filename if running a slurm job
# This is useful when you want to have things from a different job but same
# same checkpoint dir not resume.
_C.RL.preemption.append_slurm_job_id = False
# Number of gradient updates between saving the resume state
_C.RL.preemption.save_resume_state_interval = 100
# Save resume states only when running with slurm
# This is nice if you don't want debug jobs to resume
_C.RL.preemption.save_state_batch_only = False
# -----------------------------------------------------------------------------
# POLICY CONFIG
# -----------------------------------------------------------------------------
_C.RL.POLICY = CN()
_C.RL.POLICY.name = "PointNavResNetPolicy"
_C.RL.POLICY.action_distribution_type = "categorical" # or 'gaussian'
# If the list is empty, all keys will be included.
_C.RL.POLICY.include_visual_keys = []
_C.RL.GYM_OBS_KEYS = []
# For gaussian action distribution:
_C.RL.POLICY.ACTION_DIST = CN()
_C.RL.POLICY.ACTION_DIST.use_log_std = True
_C.RL.POLICY.ACTION_DIST.use_softplus = False
# If True, the std will be a parameter not conditioned on state
_C.RL.POLICY.ACTION_DIST.use_std_param = False
# If True, the std will be clamped to the specified min and max std values
_C.RL.POLICY.ACTION_DIST.clamp_std = True
_C.RL.POLICY.ACTION_DIST.min_std = 1e-6
_C.RL.POLICY.ACTION_DIST.max_std = 1
_C.RL.POLICY.ACTION_DIST.min_log_std = -5
_C.RL.POLICY.ACTION_DIST.max_log_std = 2
# For continuous action distributions (including gaussian):
_C.RL.POLICY.ACTION_DIST.action_activation = "tanh" # ['tanh', '']
# -----------------------------------------------------------------------------
# OBS_TRANSFORMS CONFIG
# -----------------------------------------------------------------------------
_C.RL.POLICY.OBS_TRANSFORMS = CN()
_C.RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS = tuple()
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER = CN()
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER.HEIGHT = 256
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER.WIDTH = 256
_C.RL.POLICY.OBS_TRANSFORMS.RESIZE_SHORTEST_EDGE = CN()
_C.RL.POLICY.OBS_TRANSFORMS.RESIZE_SHORTEST_EDGE.SIZE = 256
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ = CN()
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ.HEIGHT = 256
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ.WIDTH = 512
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ.SENSOR_UUIDS = list()
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH = CN()
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.HEIGHT = 256
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.WIDTH = 256
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.FOV = 180
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.PARAMS = (0.2, 0.2, 0.2)
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.SENSOR_UUIDS = list()
_C.RL.POLICY.OBS_TRANSFORMS.EQ2CUBE = CN()
_C.RL.POLICY.OBS_TRANSFORMS.EQ2CUBE.HEIGHT = 256
_C.RL.POLICY.OBS_TRANSFORMS.EQ2CUBE.WIDTH = 256
_C.RL.POLICY.OBS_TRANSFORMS.EQ2CUBE.SENSOR_UUIDS = list()
# -----------------------------------------------------------------------------
# PROXIMAL POLICY OPTIMIZATION (PPO)
# -----------------------------------------------------------------------------
_C.RL.PPO = CN()
_C.RL.PPO.clip_param = 0.2
_C.RL.PPO.ppo_epoch = 4
_C.RL.PPO.num_mini_batch = 2
_C.RL.PPO.value_loss_coef = 0.5
_C.RL.PPO.entropy_coef = 0.01
_C.RL.PPO.lr = 2.5e-4
_C.RL.PPO.eps = 1e-5
_C.RL.PPO.max_grad_norm = 0.5
_C.RL.PPO.num_steps = 5
_C.RL.PPO.use_gae = True
_C.RL.PPO.use_linear_lr_decay = False
_C.RL.PPO.use_linear_clip_decay = False
_C.RL.PPO.gamma = 0.99
_C.RL.PPO.tau = 0.95
_C.RL.PPO.reward_window_size = 50
_C.RL.PPO.use_normalized_advantage = False
_C.RL.PPO.hidden_size = 512
# Use double buffered sampling, typically helps
# when environment time is similar or large than
# policy inference time during rollout generation
# Not that this does not change the memory requirements
_C.RL.PPO.use_double_buffered_sampler = False
# -----------------------------------------------------------------------------
# DECENTRALIZED DISTRIBUTED PROXIMAL POLICY OPTIMIZATION (DD-PPO)
# -----------------------------------------------------------------------------
_C.RL.DDPPO = CN()
_C.RL.DDPPO.sync_frac = 0.6
_C.RL.DDPPO.distrib_backend = "GLOO"
_C.RL.DDPPO.rnn_type = "GRU"
_C.RL.DDPPO.num_recurrent_layers = 1
_C.RL.DDPPO.backbone = "resnet18"
_C.RL.DDPPO.pretrained_weights = "data/ddppo-models/gibson-2plus-resnet50.pth"
# Loads pretrained weights
_C.RL.DDPPO.pretrained = False
# Loads just the visual encoder backbone weights
_C.RL.DDPPO.pretrained_encoder = False
# Whether or not the visual encoder backbone will be trained
_C.RL.DDPPO.train_encoder = True
# Whether or not to reset the critic linear layer
_C.RL.DDPPO.reset_critic = True
# Forces distributed mode for testing
_C.RL.DDPPO.force_distributed = False
# -----------------------------------------------------------------------------
# ORBSLAM2 BASELINE
# -----------------------------------------------------------------------------
_C.ORBSLAM2 = CN()
_C.ORBSLAM2.SLAM_VOCAB_PATH = "habitat_baselines/slambased/data/ORBvoc.txt"
_C.ORBSLAM2.SLAM_SETTINGS_PATH = (
"habitat_baselines/slambased/data/mp3d3_small1k.yaml"
)
_C.ORBSLAM2.MAP_CELL_SIZE = 0.1
_C.ORBSLAM2.MAP_SIZE = 40
_C.ORBSLAM2.CAMERA_HEIGHT = get_task_config().SIMULATOR.DEPTH_SENSOR.POSITION[
1
]
_C.ORBSLAM2.BETA = 100
_C.ORBSLAM2.H_OBSTACLE_MIN = 0.3 * _C.ORBSLAM2.CAMERA_HEIGHT
_C.ORBSLAM2.H_OBSTACLE_MAX = 1.0 * _C.ORBSLAM2.CAMERA_HEIGHT
_C.ORBSLAM2.D_OBSTACLE_MIN = 0.1
_C.ORBSLAM2.D_OBSTACLE_MAX = 4.0
_C.ORBSLAM2.PREPROCESS_MAP = True
_C.ORBSLAM2.MIN_PTS_IN_OBSTACLE = (
get_task_config().SIMULATOR.DEPTH_SENSOR.WIDTH / 2.0
)
_C.ORBSLAM2.ANGLE_TH = float(np.deg2rad(15))
_C.ORBSLAM2.DIST_REACHED_TH = 0.15
_C.ORBSLAM2.NEXT_WAYPOINT_TH = 0.5
_C.ORBSLAM2.NUM_ACTIONS = 3
_C.ORBSLAM2.DIST_TO_STOP = 0.05
_C.ORBSLAM2.PLANNER_MAX_STEPS = 500
_C.ORBSLAM2.DEPTH_DENORM = get_task_config().SIMULATOR.DEPTH_SENSOR.MAX_DEPTH
# -----------------------------------------------------------------------------
# PROFILING
# -----------------------------------------------------------------------------
_C.PROFILING = CN()
_C.PROFILING.CAPTURE_START_STEP = -1
_C.PROFILING.NUM_STEPS_TO_CAPTURE = -1
_C.register_renamed_key
def get_config(
config_paths: Optional[Union[List[str], str]] = None,
opts: Optional[list] = None,
) -> CN:
r"""Create a unified config with default values overwritten by values from
:ref:`config_paths` and overwritten by options from :ref:`opts`.
Args:
config_paths: List of config paths or string that contains comma
separated list of config paths.
opts: Config options (keys, values) in a list (e.g., passed from
command line into the config. For example, ``opts = ['FOO.BAR',
0.5]``. Argument can be used for parameter sweeping or quick tests.
"""
config = _C.clone()
if config_paths:
if isinstance(config_paths, str):
if CONFIG_FILE_SEPARATOR in config_paths:
config_paths = config_paths.split(CONFIG_FILE_SEPARATOR)
else:
config_paths = [config_paths]
for config_path in config_paths:
config.merge_from_file(config_path)
if opts:
for k, v in zip(opts[0::2], opts[1::2]):
if k == "BASE_TASK_CONFIG_PATH":
config.BASE_TASK_CONFIG_PATH = v
config.TASK_CONFIG = get_task_config(config.BASE_TASK_CONFIG_PATH)
# In case the config specifies overrides for the TASK_CONFIG, we
# remerge the files here
if config_paths:
for config_path in config_paths:
config.merge_from_file(config_path)
if opts:
config.CMD_TRAILING_OPTS = config.CMD_TRAILING_OPTS + opts
config.merge_from_list(config.CMD_TRAILING_OPTS)
if config.NUM_PROCESSES != -1:
warnings.warn(
"NUM_PROCESSES is depricated and will be removed in a future version."
" Use NUM_ENVIRONMENTS instead."
" Overwriting NUM_ENVIRONMENTS with NUM_PROCESSES for backwards compatibility."
)
config.NUM_ENVIRONMENTS = config.NUM_PROCESSES
config.freeze()
return config
|
py
|
1a5dc1ea13d214a9d7f0b0f88e18a96a761938b9
|
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'project-name'
|
py
|
1a5dc2233ff8b0034e7dc916dbfc2d2ade74e357
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import re
import json
# Clean item data
class WikiPipeline(object):
def process_item(self, item, spider):
if item['comment'] is not None:
item['comment'] = re.sub('<[^>]*>', '', item['comment'])
return item
# Export items to file
class JsonWriterPipeline(object):
def __init__(self):
self.file = open('revision.json', 'wb')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
|
py
|
1a5dc2662cd95f3a84ad3d778ab6dbcdac75b1c4
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## solvers.py
##
## Created on: Nov 27, 2016
## Author: Alexey S. Ignatiev
## E-mail: [email protected]
##
"""
===============
List of classes
===============
.. autosummary::
:nosignatures:
SolverNames
Solver
Cadical
Gluecard3
Gluecard4
Glucose3
Glucose4
Lingeling
MapleChrono
MapleCM
Maplesat
Mergesat3
Minicard
Minisat22
MinisatGH
==================
Module description
==================
This module provides *incremental* access to a few modern SAT solvers. The
solvers supported by PySAT are:
- CaDiCaL (`rel-1.0.3 <https://github.com/arminbiere/cadical>`__)
- Glucose (`3.0 <http://www.labri.fr/perso/lsimon/glucose/>`__)
- Glucose (`4.1 <http://www.labri.fr/perso/lsimon/glucose/>`__)
- Lingeling (`bbc-9230380-160707 <http://fmv.jku.at/lingeling/>`__)
- MapleLCMDistChronoBT (`SAT competition 2018 version <http://sat2018.forsyte.tuwien.ac.at/solvers/main_and_glucose_hack/>`__)
- MapleCM (`SAT competition 2018 version <http://sat2018.forsyte.tuwien.ac.at/solvers/main_and_glucose_hack/>`__)
- Maplesat (`MapleCOMSPS_LRB <https://sites.google.com/a/gsd.uwaterloo.ca/maplesat/>`__)
- Mergesat (`3.0 <https://github.com/conp-solutions/mergesat>`__)
- Minicard (`1.2 <https://github.com/liffiton/minicard>`__)
- Minisat (`2.2 release <http://minisat.se/MiniSat.html>`__)
- Minisat (`GitHub version <https://github.com/niklasso/minisat>`__)
Additionally, PySAT includes the versions of :class:`Glucose3` and
:class:`Glucose4` that support native cardinality constraints, ported from
:class:`Minicard`:
- Gluecard3
- Gluecard4
All solvers can be accessed through a unified MiniSat-like [1]_ incremental
[2]_ interface described below.
.. [1] Niklas Eén, Niklas Sörensson. *An Extensible SAT-solver*. SAT 2003.
pp. 502-518
.. [2] Niklas Eén, Niklas Sörensson. *Temporal induction by incremental SAT
solving*. Electr. Notes Theor. Comput. Sci. 89(4). 2003. pp. 543-560
The module provides direct access to all supported solvers using the
corresponding classes :class:`Cadical`, :class:`Gluecard3`,
:class:`Gluecard4`, :class:`Glucose3`, :class:`Glucose4`,
:class:`Lingeling`, :class:`MapleChrono`, :class:`MapleCM`,
:class:`Maplesat`, :class:`Mergesat3`, :class:`Minicard`,
:class:`Minisat22`, and :class:`MinisatGH`. However, the solvers can also
be accessed through the common base class :class:`Solver` using the solver
``name`` argument. For example, both of the following pieces of code
create a copy of the :class:`Glucose3` solver:
.. code-block:: python
>>> from pysat.solvers import Glucose3, Solver
>>>
>>> g = Glucose3()
>>> g.delete()
>>>
>>> s = Solver(name='g3')
>>> s.delete()
The :mod:`pysat.solvers` module is designed to create and manipulate SAT
solvers as *oracles*, i.e. it does not give access to solvers' internal
parameters such as variable polarities or activities. PySAT provides a user
with the following basic SAT solving functionality:
- creating and deleting solver objects
- adding individual clauses and formulas to solver objects
- making SAT calls with or without assumptions
- propagating a given set of assumption literals
- setting preferred polarities for a (sub)set of variables
- extracting a model of a satisfiable input formula
- enumerating models of an input formula
- extracting an unsatisfiable core of an unsatisfiable formula
- extracting a `DRUP proof <http://www.cs.utexas.edu/~marijn/drup/>`__ logged by the solver
PySAT supports both non-incremental and incremental SAT solving.
Incrementality can be achieved with the use of the MiniSat-like
*assumption-based* interface [2]_. It can be helpful if multiple calls to a
SAT solver are needed for the same formula using different sets of
"assumptions", e.g. when doing consecutive SAT calls for formula
:math:`\mathcal{F}\land (a_{i_1}\land\ldots\land a_{i_1+j_1})` and
:math:`\mathcal{F}\land (a_{i_2}\land\ldots\land a_{i_2+j_2})`, where every
:math:`a_{l_k}` is an assumption literal.
There are several advantages of using assumptions: (1) it enables one to
*keep and reuse* the clauses learnt during previous SAT calls at a later
stage and (2) assumptions can be easily used to extract an *unsatisfiable
core* of the formula. A drawback of assumption-based SAT solving is that
the clauses learnt are longer (they typically contain many assumption
literals), which makes the SAT calls harder.
In PySAT, assumptions should be provided as a list of literals given to the
``solve()`` method:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> s = Solver()
>>>
... # assume that solver s is fed with a formula
>>>
>>> s.solve() # a simple SAT call
True
>>>
>>> s.solve(assumptions=[1, -2, 3]) # a SAT call with assumption literals
False
>>> s.get_core() # extracting an unsatisfiable core
[3, 1]
In order to shorten the description of the module, the classes providing
direct access to the individual solvers, i.e. classes :class:`Cadical`,
:class:`Gluecard3`, :class:`Gluecard4`, :class:`Glucose3`,
:class:`Glucose4`, :class:`Lingeling`, :class:`MapleChrono`,
:class:`MapleCM`, :class:`Maplesat`, :class:`Mergesat3`,
:class:`Minicard`, :class:`Minisat22`, and :class:`MinisatGH`, are
**omitted**. They replicate the interface of the base class
:class:`Solver` and, thus, can be used the same exact way.
==============
Module details
==============
"""
#
#==============================================================================
from pysat._utils import MainThread
from pysat.formula import CNFPlus
import pysolvers
import signal
import tempfile
try: # for Python < 3.8
from time import clock as process_time
except ImportError: # for Python >= 3.8
from time import process_time
#
#==============================================================================
class NoSuchSolverError(Exception):
"""
This exception is raised when creating a new SAT solver whose name
does not match any name in :class:`SolverNames`. The list of *known*
solvers includes the names `'cadical'`, `'gluecard3'`, `'gluecard4'`,
`'glucose3'`, `'glucose4'`, `'lingeling'`, `'maplechrono'`,
`'maplecm'`, `'maplesat'`, `'mergesat3'`, `'minicard'`, `'minisat22'`,
and `'minisatgh'`.
"""
pass
#
#==============================================================================
class SolverNames(object):
"""
This class serves to determine the solver requested by a user given a
string name. This allows for using several possible names for
specifying a solver.
.. code-block:: python
cadical = ('cd', 'cdl', 'cadical')
gluecard3 = ('gc3', 'gc30', 'gluecard3', 'gluecard30')
gluecard41 = ('gc3', 'gc41', 'gluecard4', 'gluecard41')
glucose3 = ('g3', 'g30', 'glucose3', 'glucose30')
glucose4 = ('g4', 'g41', 'glucose4', 'glucose41')
lingeling = ('lgl', 'lingeling')
maplechrono = ('mcb', 'chrono', 'maplechrono')
maplecm = ('mcm', 'maplecm')
maplesat = ('mpl', 'maple', 'maplesat')
mergesat3 = ('mg3', 'mgs3', 'mergesat3', 'mergesat30')
minicard = ('mc', 'mcard', 'minicard')
minisat22 = ('m22', 'msat22', 'minisat22')
minisatcs = ('mcs', 'msat-cs', 'minisat-cs')
minisatgh = ('mgh', 'msat-gh', 'minisat-gh')
As a result, in order to select Glucose3, a user can specify the
solver's name: either ``'g3'``, ``'g30'``, ``'glucose3'``, or
``'glucose30'``. *Note that the capitalized versions of these names are
also allowed*.
"""
cadical = ('cd', 'cdl', 'cadical')
gluecard3 = ('gc3', 'gc30', 'gluecard3', 'gluecard30')
gluecard4 = ('gc4', 'gc41', 'gluecard4', 'gluecard41')
glucose3 = ('g3', 'g30', 'glucose3', 'glucose30')
glucose4 = ('g4', 'g41', 'glucose4', 'glucose41')
lingeling = ('lgl', 'lingeling')
maplechrono = ('mcb', 'chrono', 'chronobt', 'maplechrono')
maplecm = ('mcm', 'maplecm')
maplesat = ('mpl', 'maple', 'maplesat')
mergesat3 = ('mg3', 'mgs3', 'mergesat3', 'mergesat30')
minicard = ('mc', 'mcard', 'minicard')
minisat22 = ('m22', 'msat22', 'minisat22')
minisatcs = ('mcs', 'msat-cs', 'minisat-cs')
minisatgh = ('mgh', 'msat-gh', 'minisat-gh')
#
#==============================================================================
class Solver(object):
"""
Main class for creating and manipulating a SAT solver. Any available
SAT solver can be accessed as an object of this class and so
:class:`Solver` can be seen as a wrapper for all supported solvers.
The constructor of :class:`Solver` has only one mandatory argument
``name``, while all the others are default. This means that explicit
solver constructors, e.g. :class:`Glucose3` or :class:`MinisatGH` etc.,
have only default arguments.
:param name: solver's name (see :class:`SolverNames`).
:param bootstrap_with: a list of clauses for solver initialization.
:param use_timer: whether or not to measure SAT solving time.
:type name: str
:type bootstrap_with: iterable(iterable(int))
:type use_timer: bool
The ``bootstrap_with`` argument is useful when there is an input CNF
formula to feed the solver with. The argument expects a list of
clauses, each clause being a list of literals, i.e. a list of integers.
If set to ``True``, the ``use_timer`` parameter will force the solver
to accumulate the time spent by all SAT calls made with this solver but
also to keep time of the last SAT call.
Once created and used, a solver must be deleted with the :meth:`delete`
method. Alternatively, if created using the ``with`` statement,
deletion is done automatically when the end of the ``with`` block is
reached.
Given the above, a couple of examples of solver creation are the
following:
.. code-block:: python
>>> from pysat.solvers import Solver, Minisat22
>>>
>>> s = Solver(name='g4')
>>> s.add_clause([-1, 2])
>>> s.add_clause([-1, -2])
>>> s.solve()
True
>>> print(s.get_model())
[-1, -2]
>>> s.delete()
>>>
>>> with Minisat22(bootstrap_with=[[-1, 2], [-1, -2]]) as m:
... m.solve()
True
... print(m.get_model())
[-1, -2]
Note that while all explicit solver classes necessarily have default
arguments ``bootstrap_with`` and ``use_timer``, solvers
:class:`Cadical`, :class:`Lingeling`, :class:`Gluecard3`,
:class:`Gluecard4`, :class:`Glucose3`, :class:`Glucose4`,
:class:`MapleChrono`, :class:`MapleCM`, and :class:`Maplesat` can have
additional default arguments. One such argument supported by is `DRUP
proof <http://www.cs.utexas.edu/~marijn/drup/>`__ logging. This can be
enabled by setting the ``with_proof`` argument to ``True`` (``False``
by default):
.. code-block:: python
>>> from pysat.solvers import Lingeling
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=2) # pigeonhole principle for 3 pigeons
>>>
>>> with Lingeling(bootstrap_with=cnf.clauses, with_proof=True) as l:
... l.solve()
False
... l.get_proof()
['-5 0', '6 0', '-2 0', '-4 0', '1 0', '3 0', '0']
Additionally, Glucose-based solvers, namely :class:`Glucose3`,
:class:`Glucose4`, :class:`Gluecard3`, and :class:`Gluecard4` have one
more default argument ``incr`` (``False`` by default), which enables
incrementality features introduced in Glucose3 [3]_. To summarize, the
additional arguments of Glucose are:
:param incr: enable the incrementality features of Glucose3 [3]_.
:param with_proof: enable proof logging in the `DRUP format <http://www.cs.utexas.edu/~marijn/drup/>`__.
:type incr: bool
:type with_proof: bool
.. [3] Gilles Audemard, Jean-Marie Lagniez, Laurent Simon. *Improving
Glucose for Incremental SAT Solving with Assumptions: Application
to MUS Extraction*. SAT 2013. pp. 309-317
"""
def __init__(self, name='m22', bootstrap_with=None, use_timer=False, **kwargs):
"""
Basic constructor.
"""
self.solver = None
self.new(name, bootstrap_with, use_timer, **kwargs)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.solver.delete()
self.solver = None
def new(self, name='m22', bootstrap_with=None, use_timer=False, **kwargs):
"""
The actual solver constructor invoked from ``__init__()``. Chooses
the solver to run, based on its name. See :class:`Solver` for the
parameters description.
:raises NoSuchSolverError: if there is no solver matching the given
name.
"""
# checking keyword arguments
kwallowed = set(['incr', 'with_proof'])
for a in kwargs:
if a not in kwallowed:
raise TypeError('Unexpected keyword argument \'{0}\''.format(a))
if not self.solver:
name_ = name.lower()
if name_ in SolverNames.cadical:
self.solver = Cadical(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.gluecard3:
self.solver = Gluecard3(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.gluecard4:
self.solver = Gluecard4(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.glucose3:
self.solver = Glucose3(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.glucose4:
self.solver = Glucose4(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.lingeling:
self.solver = Lingeling(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.maplechrono:
self.solver = MapleChrono(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.maplecm:
self.solver = MapleCM(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.maplesat:
self.solver = Maplesat(bootstrap_with, use_timer, **kwargs)
elif name_ in SolverNames.mergesat3:
self.solver = Mergesat3(bootstrap_with, use_timer)
elif name_ in SolverNames.minicard:
self.solver = Minicard(bootstrap_with, use_timer)
elif name_ in SolverNames.minisat22:
self.solver = Minisat22(bootstrap_with, use_timer)
elif name_ in SolverNames.minisatcs:
self.solver = MinisatCS(bootstrap_with, use_timer)
elif name_ in SolverNames.minisatgh:
self.solver = MinisatGH(bootstrap_with, use_timer)
else:
raise(NoSuchSolverError(name))
def delete(self):
"""
Solver destructor, which must be called explicitly if the solver
is to be removed. This is not needed inside an ``with`` block.
"""
if self.solver:
self.solver.delete()
self.solver = None
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. Currently, the
statistics includes the number of restarts, conflicts, decisions,
and propagations.
:rtype: dict.
Example:
.. code-block:: python
>>> from pysat.examples.genhard import PHP
>>> cnf = PHP(5)
>>> from pysat.solvers import Solver
>>> with Solver(bootstrap_with=cnf) as s:
... print(s.solve())
... print(s.accum_stats())
False
{'restarts': 2, 'conflicts': 201, 'decisions': 254, 'propagations': 2321}
"""
if self.solver:
return self.solver.accum_stats()
def solve(self, assumptions=[]):
"""
This method is used to check satisfiability of a CNF formula given
to the solver (see methods :meth:`add_clause` and
:meth:`append_formula`). Unless interrupted with SIGINT, the
method returns either ``True`` or ``False``.
Incremental SAT calls can be made with the use of assumption
literals. (**Note** that the ``assumptions`` argument is optional
and disabled by default.)
:param assumptions: a list of assumption literals.
:type assumptions: iterable(int)
:rtype: Boolean or ``None``.
Example:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> s = Solver(bootstrap_with=[[-1, 2], [-2, 3])
>>> s.solve()
True
>>> s.solve(assumptions=[1, -3])
False
>>> s.delete()
"""
if self.solver:
return self.solver.solve(assumptions)
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
This method is used to check satisfiability of a CNF formula given
to the solver (see methods :meth:`add_clause` and
:meth:`append_formula`), taking into account the upper bounds on
the *number of conflicts* (see :meth:`conf_budget`) and the *number
of propagations* (see :meth:`prop_budget`). If the number of
conflicts or propagations is set to be larger than 0 then the
following SAT call done with :meth:`solve_limited` will not exceed
these values, i.e. it will be *incomplete*. Otherwise, such a call
will be identical to :meth:`solve`.
As soon as the given upper bound on the number of conflicts or
propagations is reached, the SAT call is dropped returning
``None``, i.e. *unknown*. ``None`` can also be returned if the call
is interrupted by SIGINT. Otherwise, the method returns ``True`` or
``False``.
**Note** that only MiniSat-like solvers support this functionality
(e.g. :class:`Cadical` and :class:`Lingeling` do not support it).
Incremental SAT calls can be made with the use of assumption
literals. (**Note** that the ``assumptions`` argument is optional
and disabled by default.)
**Note** that since SIGINT handling and :meth:`interrupt` are not
configured to work *together* at this point, additional input
parameter ``expect_interrupt`` is assumed to be given, indicating
what kind of interruption may happen during the execution of
:meth:`solve_limited`: whether a SIGINT signal or internal
:meth:`interrupt`. By default, a SIGINT signal handling is
assumed. If ``expect_interrupt`` is set to ``True`` and eventually
a SIGINT is received, the behavior is **undefined**.
:param assumptions: a list of assumption literals.
:param expect_interrupt: whether :meth:`interrupt` will be called
:type assumptions: iterable(int)
:type expect_interrupt: bool
:rtype: Boolean or ``None``.
Doing limited SAT calls can be of help if it is known that
*complete* SAT calls are too expensive. For instance, it can be
useful when minimizing unsatisfiable cores in MaxSAT (see
:meth:`pysat.examples.RC2.minimize_core` also shown below).
Also and besides supporting deterministic interruption based on
:meth:`conf_budget` and/or :meth:`prop_budget`, limited SAT calls
support *deterministic* and *non-deterministic* interruption from
inside a Python script. See the :meth:`interrupt` and
:meth:`clear_interrupt` methods for details.
Usage example:
.. code-block:: python
... # assume that a SAT oracle is set up to contain an unsatisfiable
... # formula, and its core is stored in variable "core"
oracle.conf_budget(1000) # getting at most 1000 conflicts be call
i = 0
while i < len(core):
to_test = core[:i] + core[(i + 1):]
# doing a limited call
if oracle.solve_limited(assumptions=to_test) == False:
core = to_test
else: # True or *unknown*
i += 1
"""
if self.solver:
return self.solver.solve_limited(assumptions, expect_interrupt)
def conf_budget(self, budget=-1):
"""
Set limit (i.e. the upper bound) on the number of conflicts in the
next limited SAT call (see :meth:`solve_limited`). The limit value
is given as a ``budget`` variable and is an integer greater than
``0``. If the budget is set to ``0`` or ``-1``, the upper bound on
the number of conflicts is disabled.
:param budget: the upper bound on the number of conflicts.
:type budget: int
Example:
.. code-block:: python
>>> from pysat.solvers import MinisatGH
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=20) # PHP20 is too hard for a SAT solver
>>> m = MinisatGH(bootstrap_with=cnf.clauses)
>>>
>>> m.conf_budget(2000) # getting at most 2000 conflicts
>>> print(m.solve_limited()) # making a limited oracle call
None
>>> m.delete()
"""
if self.solver:
self.solver.conf_budget(budget)
def prop_budget(self, budget=-1):
"""
Set limit (i.e. the upper bound) on the number of propagations in
the next limited SAT call (see :meth:`solve_limited`). The limit
value is given as a ``budget`` variable and is an integer greater
than ``0``. If the budget is set to ``0`` or ``-1``, the upper
bound on the number of conflicts is disabled.
:param budget: the upper bound on the number of propagations.
:type budget: int
Example:
.. code-block:: python
>>> from pysat.solvers import MinisatGH
>>> from pysat.examples.genhard import Parity
>>>
>>> cnf = Parity(size=10) # too hard for a SAT solver
>>> m = MinisatGH(bootstrap_with=cnf.clauses)
>>>
>>> m.prop_budget(100000) # doing at most 100000 propagations
>>> print(m.solve_limited()) # making a limited oracle call
None
>>> m.delete()
"""
if self.solver:
self.solver.prop_budget(budget)
def interrupt(self):
"""
Interrupt the execution of the current *limited* SAT call (see
:meth:`solve_limited`). Can be used to enforce time limits using
timer objects. The interrupt must be cleared before performing
another SAT call (see :meth:`clear_interrupt`).
**Note** that this method can be called if limited SAT calls are
made with the option ``expect_interrupt`` set to ``True``.
Behaviour is **undefined** if used to interrupt a *non-limited*
SAT call (see :meth:`solve`).
Example:
.. code-block:: python
>>> from pysat.solvers import MinisatGH
>>> from pysat.examples.genhard import PHP
>>> from threading import Timer
>>>
>>> cnf = PHP(nof_holes=20) # PHP20 is too hard for a SAT solver
>>> m = MinisatGH(bootstrap_with=cnf.clauses)
>>>
>>> def interrupt(s):
>>> s.interrupt()
>>>
>>> timer = Timer(10, interrupt, [m])
>>> timer.start()
>>>
>>> print(m.solve_limited(expect_interrupt=True))
None
>>> m.delete()
"""
if self.solver:
self.solver.interrupt()
def clear_interrupt(self):
"""
Clears a previous interrupt. If a limited SAT call was interrupted
using the :meth:`interrupt` method, this method **must be called**
before calling the SAT solver again.
"""
if self.solver:
self.solver.clear_interrupt()
def propagate(self, assumptions=[], phase_saving=0):
"""
The method takes a list of assumption literals and does unit
propagation of each of these literals consecutively. A Boolean
status is returned followed by a list of assigned (assumed and also
propagated) literals. The status is ``True`` if no conflict arised
during propagation. Otherwise, the status is ``False``.
Additionally, a user may specify an optional argument
``phase_saving`` (``0`` by default) to enable MiniSat-like phase
saving.
**Note** that only MiniSat-like solvers support this functionality
(e.g. :class:`Cadical` and :class:`Lingeling` do not support it).
:param assumptions: a list of assumption literals.
:param phase_saving: enable phase saving (can be ``0``, ``1``, and
``2``).
:type assumptions: iterable(int)
:type phase_saving: int
:rtype: tuple(bool, list(int)).
Usage example:
.. code-block:: python
>>> from pysat.solvers import Glucose3
>>> from pysat.card import *
>>>
>>> cnf = CardEnc.atmost(lits=range(1, 6), bound=1, encoding=EncType.pairwise)
>>> g = Glucose3(bootstrap_with=cnf.clauses)
>>>
>>> g.propagate(assumptions=[1])
(True, [1, -2, -3, -4, -5])
>>>
>>> g.add_clause([2])
>>> g.propagate(assumptions=[1])
(False, [])
>>>
>>> g.delete()
"""
if self.solver:
return self.solver.propagate(assumptions, phase_saving)
def set_phases(self, literals=[]):
"""
The method takes a list of literals as an argument and sets
*phases* (or MiniSat-like *polarities*) of the corresponding
variables respecting the literals. For example, if a given list of
literals is ``[1, -513]``, the solver will try to set variable
:math:`x_1` to true while setting :math:`x_{513}` to false.
**Note** that once these preferences are specified,
:class:`MinisatGH` and :class:`Lingeling` will always respect them
when branching on these variables. However, solvers
:class:`Glucose3`, :class:`Glucose4`, :class:`MapleChrono`,
:class:`MapleCM`, :class:`Maplesat`, :class:`Minisat22`, and
:class:`Minicard` can redefine the preferences in any of the
following SAT calls due to the phase saving heuristic.
Also **note** that :class:`Cadical` does not support this
functionality.
:param literals: a list of literals.
:type literals: iterable(int)
Usage example:
.. code-block:: python
>>> from pysat.solvers import Glucose3
>>>
>>> g = Glucose3(bootstrap_with=[[1, 2]])
>>> # the formula has 3 models: [-1, 2], [1, -2], [1, 2]
>>>
>>> g.set_phases(literals=[1, 2])
>>> g.solve()
True
>>> g.get_model()
[1, 2]
>>>
>>> g.delete()
"""
if self.solver:
return self.solver.set_phases(literals)
def get_status(self):
"""
The result of a previous SAT call is stored in an internal
variable and can be later obtained using this method.
:rtype: Boolean or ``None``.
``None`` is returned if a previous SAT call was interrupted.
"""
if self.solver:
return self.solver.get_status()
def get_model(self):
"""
The method is to be used for extracting a satisfying assignment for
a CNF formula given to the solver. A model is provided if a
previous SAT call returned ``True``. Otherwise, ``None`` is
reported.
:rtype: list(int) or ``None``.
Example:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> s = Solver()
>>> s.add_clause([-1, 2])
>>> s.add_clause([-1, -2])
>>> s.add_clause([1, -2])
>>> s.solve()
True
>>> print(s.get_model())
[-1, -2]
>>> s.delete()
"""
if self.solver:
return self.solver.get_model()
def get_core(self):
"""
This method is to be used for extracting an unsatisfiable core in
the form of a subset of a given set of assumption literals, which
are responsible for unsatisfiability of the formula. This can be
done only if the previous SAT call returned ``False`` (*UNSAT*).
Otherwise, ``None`` is returned.
:rtype: list(int) or ``None``.
Usage example:
.. code-block:: python
>>> from pysat.solvers import Minisat22
>>> m = Minisat22()
>>> m.add_clause([-1, 2])
>>> m.add_clause([-2, 3])
>>> m.add_clause([-3, 4])
>>> m.solve(assumptions=[1, 2, 3, -4])
False
>>> print(m.get_core()) # literals 2 and 3 are not in the core
[-4, 1]
>>> m.delete()
"""
if self.solver:
return self.solver.get_core()
def get_proof(self):
"""
A DRUP proof can be extracted using this method if the solver was
set up to provide a proof. Otherwise, the method returns ``None``.
:rtype: list(str) or ``None``.
Example:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=3)
>>> with Solver(name='g4', with_proof=True) as g:
... g.append_formula(cnf.clauses)
... g.solve()
False
... print(g.get_proof())
['-8 4 1 0', '-10 0', '-2 0', '-4 0', '-8 0', '-6 0', '0']
"""
if self.solver:
return self.solver.get_proof()
def time(self):
"""
Get the time spent when doing the last SAT call. **Note** that the
time is measured only if the ``use_timer`` argument was previously
set to ``True`` when creating the solver (see :class:`Solver` for
details).
:rtype: float.
Example usage:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=10)
>>> with Solver(bootstrap_with=cnf.clauses, use_timer=True) as s:
... print(s.solve())
False
... print('{0:.2f}s'.format(s.time()))
150.16s
"""
if self.solver:
return self.solver.time()
def time_accum(self):
"""
Get the time spent for doing all SAT calls accumulated. **Note**
that the time is measured only if the ``use_timer`` argument was
previously set to ``True`` when creating the solver (see
:class:`Solver` for details).
:rtype: float.
Example usage:
.. code-block:: python
>>> from pysat.solvers import Solver
>>> from pysat.examples.genhard import PHP
>>>
>>> cnf = PHP(nof_holes=10)
>>> with Solver(bootstrap_with=cnf.clauses, use_timer=True) as s:
... print(s.solve(assumptions=[1]))
False
... print('{0:.2f}s'.format(s.time()))
1.76s
... print(s.solve(assumptions=[-1]))
False
... print('{0:.2f}s'.format(s.time()))
113.58s
... print('{0:.2f}s'.format(s.time_accum()))
115.34s
"""
if self.solver:
return self.solver.time_accum()
def nof_vars(self):
"""
This method returns the number of variables currently appearing in
the formula given to the solver.
:rtype: int.
Example:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-2, 3]])
>>> s.nof_vars()
3
"""
if self.solver:
return self.solver.nof_vars()
def nof_clauses(self):
"""
This method returns the number of clauses currently appearing in
the formula given to the solver.
:rtype: int.
Example:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-2, 3]])
>>> s.nof_clauses()
2
"""
if self.solver:
return self.solver.nof_clauses()
def enum_models(self, assumptions=[]):
"""
This method can be used to enumerate models of a CNF formula and
it performs as a standard Python iterator. The method can be
called without arguments but also with an argument
``assumptions``, which represents a list of literals to "assume".
.. warning::
Once finished, model enumeration results in the target formula
being *unsatisfiable*. This is because the enumeration process
*blocks* each previously computed model by adding a new
clause until no more models of the formula exist.
:param assumptions: a list of assumption literals.
:type assumptions: iterable(int)
:rtype: list(int).
Example:
.. code-block:: python
>>> with Solver(bootstrap_with=[[-1, 2], [-2, 3]]) as s:
... for m in s.enum_models():
... print(m)
[-1, -2, -3]
[-1, -2, 3]
[-1, 2, 3]
[1, 2, 3]
>>>
>>> with Solver(bootstrap_with=[[-1, 2], [-2, 3]]) as s:
... for m in s.enum_models(assumptions=[1]):
... print(m)
[1, 2, 3]
"""
if self.solver:
return self.solver.enum_models(assumptions)
def add_clause(self, clause, no_return=True):
"""
This method is used to add a single clause to the solver. An
optional argument ``no_return`` controls whether or not to check
the formula's satisfiability after adding the new clause.
:param clause: an iterable over literals.
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type clause: iterable(int)
:type no_return: bool
:rtype: bool if ``no_return`` is set to ``False``.
Note that a clause can be either a ``list`` of integers or another
iterable type over integers, e.g. ``tuple`` or ``set`` among
others.
A usage example is the following:
.. code-block:: python
>>> s = Solver(bootstrap_with=[[-1, 2], [-1, -2]])
>>> s.add_clause([1], no_return=False)
False
"""
if self.solver:
res = self.solver.add_clause(clause, no_return)
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
This method is responsible for adding a new *native* AtMostK (see
:mod:`pysat.card`) constraint.
**Note that most of the solvers do not support native AtMostK
constraints**.
An AtMostK constraint is :math:`\sum_{i=1}^{n}{x_i}\leq k`. A
native AtMostK constraint should be given as a pair ``lits`` and
``k``, where ``lits`` is a list of literals in the sum.
:param lits: a list of literals.
:param k: upper bound on the number of satisfied literals
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type lits: iterable(int)
:type k: int
:type no_return: bool
:rtype: bool if ``no_return`` is set to ``False``.
A usage example is the following:
.. code-block:: python
>>> s = Solver(name='mc', bootstrap_with=[[1], [2], [3]])
>>> s.add_atmost(lits=[1, 2, 3], k=2, no_return=False)
False
>>> # the AtMostK constraint is in conflict with initial unit clauses
"""
if self.solver:
res = self.solver.add_atmost(lits, k, no_return)
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
This method can be used to add a given list of clauses into the
solver.
:param formula: a list of clauses.
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type formula: iterable(iterable(int))
:type no_return: bool
The ``no_return`` argument is set to ``True`` by default.
:rtype: bool if ``no_return`` is set to ``False``.
.. code-block:: python
>>> cnf = CNF()
... # assume the formula contains clauses
>>> s = Solver()
>>> s.append_formula(cnf.clauses, no_return=False)
True
"""
if self.solver:
res = self.solver.append_formula(formula, no_return)
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
:rtype: bool
A usage example is the following:
.. code-block:: python
>>> s = Solver(name='mc')
>>> s.supports_atmost()
True
>>> # there is support for AtMostK constraints in this solver
"""
if self.solver:
return self.solver.supports_atmost()
@staticmethod
def _proof_bin2text(bytes_):
"""
Auxiliary method to translate a proof specified in the binary DRUP
format to the text DRUP format.
:param bytes_: proof-trace as a sequence of bytes
:type bytes_: bytearray
:rtype: list(str)
"""
# necessary variables
proof, lits, lit, shift, newbeg = [], [], 0, 0, True
for byte in bytes_:
if newbeg:
# new clause; here, we expect either 'a' or 'd'
if byte == 100:
lits.append('d')
else:
assert byte == 97, 'clause should start with either \'a\' or \'d\''
newbeg = False
else:
# this is a byte of an actual literal
if byte:
lit |= (byte & 0x7f) << shift
shift += 7
if byte >> 7 == 0:
# MSB is zero => this is the last byte of the literal
lits.append(str((1 if lit % 2 == 0 else -1) * (lit >> 1)))
lit, shift = 0, 0
else:
# zero-byte indicates the end of clause
lits.append('0')
proof.append(' '.join(lits))
lits, newbeg = [], True
if not newbeg and not lits:
proof.append('0')
return proof
#
#==============================================================================
class Cadical(object):
"""
CaDiCaL SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by CaDiCaL.')
self.cadical = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.cadical = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.cadical:
self.cadical = pysolvers.cadical_new()
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.cadical_tracepr(self.cadical, self.prfile)
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.cadical:
pysolvers.cadical_del(self.cadical, self.prfile)
self.cadical = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.cadical:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.cadical_solve(self.cadical, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
self.prev_assumps = assumptions
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def interrupt(self):
"""
Interrupt solver execution.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def clear_interrupt(self):
"""
Clears an interruption.
"""
raise NotImplementedError('Limited solve is currently unsupported by CaDiCaL.')
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
raise NotImplementedError('Simple literal propagation is not yet implemented for CaDiCaL.')
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
raise NotImplementedError('Setting preferred phases is not yet implemented for CaDiCaL.')
def get_status(self):
"""
Returns solver's status.
"""
if self.cadical:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.cadical and self.status == True:
model = pysolvers.cadical_model(self.cadical)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.cadical and self.status == False:
return pysolvers.cadical_core(self.cadical, self.prev_assumps)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.cadical and self.prfile:
self.prfile.seek(0)
# stripping may cause issues here!
return Solver._proof_bin2text(bytearray(self.prfile.read()).strip())
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.cadical:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.cadical:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.cadical:
return pysolvers.cadical_nof_vars(self.cadical)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.cadical:
return pysolvers.cadical_nof_cls(self.cadical)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.cadical:
return pysolvers.cadical_acc_stats(self.cadical)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.cadical:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.cadical:
res = pysolvers.cadical_add_cl(self.cadical, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by CaDiCaL.
"""
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.cadical:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by CaDiCaL')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Gluecard3(object):
"""
Gluecard 3 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.gluecard = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.gluecard = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.gluecard:
self.gluecard = pysolvers.gluecard3_new()
if bootstrap_with:
for clause in bootstrap_with:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
self.add_clause(clause)
else:
self.add_atmost(clause[0], clause[1])
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.gluecard3_setincr(self.gluecard)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.gluecard3_tracepr(self.gluecard, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.gluecard:
pysolvers.gluecard3_del(self.gluecard)
self.gluecard = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard3_solve(self.gluecard, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard3_solve_lim(self.gluecard,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.gluecard:
pysolvers.gluecard3_cbudget(self.gluecard, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.gluecard:
pysolvers.gluecard3_pbudget(self.gluecard, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.gluecard:
pysolvers.gluecard3_interrupt(self.gluecard)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.gluecard:
pysolvers.gluecard3_clearint(self.gluecard)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.gluecard3_propagate(self.gluecard,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.gluecard:
pysolvers.gluecard3_setphases(self.gluecard, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.gluecard:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.gluecard and self.status == True:
model = pysolvers.gluecard3_model(self.gluecard)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.gluecard and self.status == False:
return pysolvers.gluecard3_core(self.gluecard)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.gluecard and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.gluecard:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.gluecard:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.gluecard:
return pysolvers.gluecard3_nof_vars(self.gluecard)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.gluecard:
return pysolvers.gluecard3_nof_cls(self.gluecard)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.gluecard:
return pysolvers.gluecard3_acc_stats(self.gluecard)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.gluecard:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.gluecard:
res = pysolvers.gluecard3_add_cl(self.gluecard, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Gluecard.
"""
if self.gluecard:
res = pysolvers.gluecard3_add_am(self.gluecard, lits, k)
if res == False:
self.status = False
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.gluecard:
res = None
# this loop should work for a list of clauses, CNF, and CNFPlus
for clause in formula:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
res = self.add_clause(clause, no_return)
else:
res = self.add_atmost(clause[0], clause[1], no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return True
#
#==============================================================================
class Gluecard4(object):
"""
Gluecard 4 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.gluecard = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.gluecard = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.gluecard:
self.gluecard = pysolvers.gluecard41_new()
if bootstrap_with:
for clause in bootstrap_with:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
self.add_clause(clause)
else:
self.add_atmost(clause[0], clause[1])
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.gluecard41_setincr(self.gluecard)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.gluecard41_tracepr(self.gluecard, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.gluecard:
pysolvers.gluecard41_del(self.gluecard)
self.gluecard = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard41_solve(self.gluecard, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.gluecard41_solve_lim(self.gluecard,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.gluecard:
pysolvers.gluecard41_cbudget(self.gluecard, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.gluecard:
pysolvers.gluecard41_pbudget(self.gluecard, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.gluecard:
pysolvers.gluecard41_interrupt(self.gluecard)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.gluecard:
pysolvers.gluecard41_clearint(self.gluecard)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.gluecard:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.gluecard41_propagate(self.gluecard,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.gluecard:
pysolvers.gluecard41_setphases(self.gluecard, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.gluecard:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.gluecard and self.status == True:
model = pysolvers.gluecard41_model(self.gluecard)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.gluecard and self.status == False:
return pysolvers.gluecard41_core(self.gluecard)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.gluecard and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.gluecard:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.gluecard:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.gluecard:
return pysolvers.gluecard41_nof_vars(self.gluecard)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.gluecard:
return pysolvers.gluecard41_nof_cls(self.gluecard)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.gluecard:
return pysolvers.gluecard41_acc_stats(self.gluecard)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.gluecard:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.gluecard:
res = pysolvers.gluecard41_add_cl(self.gluecard, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Gluecard.
"""
if self.gluecard:
res = pysolvers.gluecard41_add_am(self.gluecard, lits, k)
if res == False:
self.status = False
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.gluecard:
res = None
# this loop should work for a list of clauses, CNF, and CNFPlus
for clause in formula:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
res = self.add_clause(clause, no_return)
else:
res = self.add_atmost(clause[0], clause[1], no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return True
#
#==============================================================================
class Glucose3(object):
"""
Glucose 3 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.glucose = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.glucose = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.glucose:
self.glucose = pysolvers.glucose3_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Glucose3')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.glucose3_setincr(self.glucose)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.glucose3_tracepr(self.glucose, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.glucose:
pysolvers.glucose3_del(self.glucose)
self.glucose = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.glucose3_solve(self.glucose, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.glucose3_solve_lim(self.glucose,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.glucose:
pysolvers.glucose3_cbudget(self.glucose, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.glucose:
pysolvers.glucose3_pbudget(self.glucose, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.glucose:
pysolvers.glucose3_interrupt(self.glucose)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.glucose:
pysolvers.glucose3_clearint(self.glucose)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.glucose3_propagate(self.glucose,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.glucose:
pysolvers.glucose3_setphases(self.glucose, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.glucose:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.glucose and self.status == True:
model = pysolvers.glucose3_model(self.glucose)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.glucose and self.status == False:
return pysolvers.glucose3_core(self.glucose)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.glucose and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.glucose:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.glucose:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.glucose:
return pysolvers.glucose3_nof_vars(self.glucose)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.glucose:
return pysolvers.glucose3_nof_cls(self.glucose)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.glucose:
return pysolvers.glucose3_acc_stats(self.glucose)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.glucose:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.glucose:
res = pysolvers.glucose3_add_cl(self.glucose, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Glucose.
"""
raise NotImplementedError('Atmost constraints are not supported by Glucose.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.glucose:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Glucose3')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Glucose4(object):
"""
Glucose 4.1 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
self.glucose = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, incr, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.glucose = None
def new(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Actual constructor of the solver.
"""
assert not incr or not with_proof, 'Incremental mode and proof tracing cannot be set together.'
if not self.glucose:
self.glucose = pysolvers.glucose41_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Glucose4')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if incr:
pysolvers.glucose41_setincr(self.glucose)
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.glucose41_tracepr(self.glucose, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.glucose:
pysolvers.glucose41_del(self.glucose)
self.glucose = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.glucose41_solve(self.glucose, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.glucose41_solve_lim(self.glucose,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.glucose:
pysolvers.glucose41_cbudget(self.glucose, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.glucose:
pysolvers.glucose41_pbudget(self.glucose, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.glucose:
pysolvers.glucose41_interrupt(self.glucose)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.glucose:
pysolvers.glucose41_clearint(self.glucose)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.glucose:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.glucose41_propagate(self.glucose,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.glucose:
pysolvers.glucose41_setphases(self.glucose, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.glucose:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.glucose and self.status == True:
model = pysolvers.glucose41_model(self.glucose)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.glucose and self.status == False:
return pysolvers.glucose41_core(self.glucose)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.glucose and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.glucose:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.glucose:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.glucose:
return pysolvers.glucose41_nof_vars(self.glucose)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.glucose:
return pysolvers.glucose41_nof_cls(self.glucose)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.glucose:
return pysolvers.glucose41_acc_stats(self.glucose)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.glucose:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.glucose:
res = pysolvers.glucose41_add_cl(self.glucose, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Glucose.
"""
raise NotImplementedError('Atmost constraints are not supported by Glucose.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.glucose:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Glucose4')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Lingeling(object):
"""
Lingeling SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by Lingeling.')
self.lingeling = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.lingeling = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.lingeling:
self.lingeling = pysolvers.lingeling_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Lingeling')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.lingeling_tracepr(self.lingeling, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.lingeling:
pysolvers.lingeling_del(self.lingeling, self.prfile)
self.lingeling = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.lingeling:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.lingeling_solve(self.lingeling,
assumptions, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
self.prev_assumps = assumptions
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def interrupt(self):
"""
Interrupt solver execution.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def clear_interrupt(self):
"""
Clears an interruption.
"""
raise NotImplementedError('Limited solve is currently unsupported by Lingeling.')
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
raise NotImplementedError('Simple literal propagation is not yet implemented for Lingeling.')
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.lingeling:
pysolvers.lingeling_setphases(self.lingeling, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.lingeling:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.lingeling and self.status == True:
model = pysolvers.lingeling_model(self.lingeling)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.lingeling and self.status == False:
return pysolvers.lingeling_core(self.lingeling, self.prev_assumps)
def get_proof(self):
"""
Get a proof produced when deciding the formula.
"""
if self.lingeling and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.lingeling:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.lingeling:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.lingeling:
return pysolvers.lingeling_nof_vars(self.lingeling)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.lingeling:
return pysolvers.lingeling_nof_cls(self.lingeling)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.lingeling:
return pysolvers.lingeling_acc_stats(self.lingeling)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.lingeling:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.lingeling:
pysolvers.lingeling_add_cl(self.lingeling, clause)
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Lingeling.
"""
raise NotImplementedError('Atmost constraints are not supported by Lingeling.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.lingeling:
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Lingeling')
for clause in formula:
self.add_clause(clause, no_return)
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class MapleChrono(object):
"""
MapleLCMDistChronoBT SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by MapleChrono.')
self.maplesat = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.maplesat = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.maplesat:
self.maplesat = pysolvers.maplechrono_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MapleChrono')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.maplechrono_tracepr(self.maplesat, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplechrono_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplechrono_solve(self.maplesat,
assumptions, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplechrono_solve_lim(self.maplesat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.maplesat:
pysolvers.maplechrono_cbudget(self.maplesat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.maplesat:
pysolvers.maplechrono_pbudget(self.maplesat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.maplesat:
pysolvers.maplechrono_interrupt(self.maplesat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.maplesat:
pysolvers.maplechrono_clearint(self.maplesat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.maplechrono_propagate(self.maplesat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.maplesat:
pysolvers.maplechrono_setphases(self.maplesat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.maplesat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.maplesat and self.status == True:
model = pysolvers.maplechrono_model(self.maplesat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplechrono_core(self.maplesat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
if self.maplesat and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.maplesat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.maplesat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplechrono_nof_vars(self.maplesat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplechrono_nof_cls(self.maplesat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.maplesat:
return pysolvers.maplechrono_acc_stats(self.maplesat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.maplesat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.maplesat:
res = pysolvers.maplechrono_add_cl(self.maplesat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MapleChrono.
"""
raise NotImplementedError('Atmost constraints are not supported by MapleChrono.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.maplesat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MapleChrono')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class MapleCM(object):
"""
MapleCM SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by MapleCM.')
self.maplesat = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.maplesat = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.maplesat:
self.maplesat = pysolvers.maplecm_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MapleCM')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.maplecm_tracepr(self.maplesat, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplecm_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplecm_solve(self.maplesat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplecm_solve_lim(self.maplesat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.maplesat:
pysolvers.maplecm_cbudget(self.maplesat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.maplesat:
pysolvers.maplecm_pbudget(self.maplesat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.maplesat:
pysolvers.maplecm_interrupt(self.maplesat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.maplesat:
pysolvers.maplecm_clearint(self.maplesat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.maplecm_propagate(self.maplesat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.maplesat:
pysolvers.maplecm_setphases(self.maplesat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.maplesat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.maplesat and self.status == True:
model = pysolvers.maplecm_model(self.maplesat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplecm_core(self.maplesat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
if self.maplesat and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.maplesat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.maplesat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplecm_nof_vars(self.maplesat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplecm_nof_cls(self.maplesat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.maplesat:
return pysolvers.maplecm_acc_stats(self.maplesat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.maplesat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.maplesat:
res = pysolvers.maplecm_add_cl(self.maplesat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MapleCM.
"""
raise NotImplementedError('Atmost constraints are not supported by MapleCM.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.maplesat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MapleCM')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Maplesat(object):
"""
MapleCOMSPS_LRB SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False, incr=False,
with_proof=False):
"""
Basic constructor.
"""
if incr:
raise NotImplementedError('Incremental mode is not supported by Maplesat.')
self.maplesat = None
self.status = None
self.prfile = None
self.new(bootstrap_with, use_timer, with_proof)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.maplesat = None
def new(self, bootstrap_with=None, use_timer=False, with_proof=False):
"""
Actual constructor of the solver.
"""
if not self.maplesat:
self.maplesat = pysolvers.maplesat_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Maplesat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
if with_proof:
self.prfile = tempfile.TemporaryFile()
pysolvers.maplesat_tracepr(self.maplesat, self.prfile)
def delete(self):
"""
Destructor.
"""
if self.maplesat:
pysolvers.maplesat_del(self.maplesat)
self.maplesat = None
if self.prfile:
self.prfile.close()
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplesat_solve(self.maplesat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.maplesat_solve_lim(self.maplesat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.maplesat:
pysolvers.maplesat_cbudget(self.maplesat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.maplesat:
pysolvers.maplesat_pbudget(self.maplesat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.maplesat:
pysolvers.maplesat_interrupt(self.maplesat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.maplesat:
pysolvers.maplesat_clearint(self.maplesat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.maplesat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.maplesat_propagate(self.maplesat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.maplesat:
pysolvers.maplesat_setphases(self.maplesat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.maplesat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.maplesat and self.status == True:
model = pysolvers.maplesat_model(self.maplesat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplesat_core(self.maplesat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
if self.maplesat and self.prfile:
self.prfile.seek(0)
return [line.rstrip().decode('ascii') for line in self.prfile.readlines()]
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.maplesat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.maplesat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplesat_nof_vars(self.maplesat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.maplesat:
return pysolvers.maplesat_nof_cls(self.maplesat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.maplesat:
return pysolvers.maplesat_acc_stats(self.maplesat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.maplesat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.maplesat:
res = pysolvers.maplesat_add_cl(self.maplesat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Maplesat.
"""
raise NotImplementedError('Atmost constraints are not supported by Maplesat.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.maplesat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Maplesat')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Mergesat3(object):
"""
MergeSat 3 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.mergesat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.mergesat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.mergesat:
self.mergesat = pysolvers.mergesat3_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Mergesat3')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.mergesat:
pysolvers.mergesat3_del(self.mergesat)
self.mergesat = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.mergesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.mergesat3_solve(self.mergesat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.mergesat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.mergesat3_solve_lim(self.mergesat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.mergesat:
pysolvers.mergesat3_cbudget(self.mergesat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.mergesat:
pysolvers.mergesat3_pbudget(self.mergesat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.mergesat:
pysolvers.mergesat3_interrupt(self.mergesat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.mergesat:
pysolvers.mergesat3_clearint(self.mergesat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.mergesat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.mergesat3_propagate(self.mergesat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.mergesat:
pysolvers.mergesat3_setphases(self.mergesat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.mergesat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.mergesat and self.status == True:
model = pysolvers.mergesat3_model(self.mergesat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.mergesat and self.status == False:
return pysolvers.mergesat3_core(self.mergesat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is currently unsupported by Mergesat3.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.mergesat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.mergesat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.mergesat:
return pysolvers.mergesat3_nof_vars(self.mergesat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.mergesat:
return pysolvers.mergesat3_nof_cls(self.mergesat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.mergesat:
return pysolvers.mergesat3_acc_stats(self.mergesat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.mergesat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.mergesat:
res = pysolvers.mergesat3_add_cl(self.mergesat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by Mergesat3.
"""
raise NotImplementedError('Atmost constraints are not supported by Mergesat3.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.mergesat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by Mergesat3')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
class Minicard(object):
"""
Minicard SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minicard = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minicard = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minicard:
self.minicard = pysolvers.minicard_new()
if bootstrap_with:
for clause in bootstrap_with:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
self.add_clause(clause)
else:
self.add_atmost(clause[0], clause[1])
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.minicard:
pysolvers.minicard_del(self.minicard)
self.minicard = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minicard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minicard_solve(self.minicard, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.minicard:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minicard_solve_lim(self.minicard,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minicard:
pysolvers.minicard_cbudget(self.minicard, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minicard:
pysolvers.minicard_pbudget(self.minicard, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.minicard:
pysolvers.minicard_interrupt(self.minicard)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.minicard:
pysolvers.minicard_clearint(self.minicard)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.minicard:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.minicard_propagate(self.minicard,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.minicard:
pysolvers.minicard_setphases(self.minicard, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.minicard:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minicard and self.status == True:
model = pysolvers.minicard_model(self.minicard)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.minicard and self.status == False:
return pysolvers.minicard_core(self.minicard)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is not supported by Minicard.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.minicard:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.minicard:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.minicard:
return pysolvers.minicard_nof_vars(self.minicard)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.minicard:
return pysolvers.minicard_nof_cls(self.minicard)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.minicard:
return pysolvers.minicard_acc_stats(self.minicard)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.minicard:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minicard:
res = pysolvers.minicard_add_cl(self.minicard, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Add a new atmost constraint to solver's internal formula.
"""
if self.minicard:
res = pysolvers.minicard_add_am(self.minicard, lits, k)
if res == False:
self.status = False
if not no_return:
return res
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.minicard:
res = None
# this loop should work for a list of clauses, CNF, and CNFPlus
for clause in formula:
if len(clause) != 2 or isinstance(clause[0], int): # it is a clause
res = self.add_clause(clause, no_return)
else:
res = self.add_atmost(clause[0], clause[1], no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return True
#
#==============================================================================
class Minisat22(object):
"""
MiniSat 2.2 SAT solver.
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minisat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minisat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = pysolvers.minisat22_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.minisat:
pysolvers.minisat22_del(self.minisat)
self.minisat = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisat22_solve(self.minisat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisat22_solve_lim(self.minisat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minisat:
pysolvers.minisat22_cbudget(self.minisat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
pysolvers.minisat22_pbudget(self.minisat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.minisat:
pysolvers.minisat22_interrupt(self.minisat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.minisat:
pysolvers.minisat22_clearint(self.minisat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.minisat22_propagate(self.minisat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.minisat:
pysolvers.minisat22_setphases(self.minisat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.minisat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = pysolvers.minisat22_model(self.minisat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.minisat and self.status == False:
return pysolvers.minisat22_core(self.minisat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is not supported by MiniSat.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.minisat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.minisat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.minisat:
return pysolvers.minisat22_nof_vars(self.minisat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.minisat:
return pysolvers.minisat22_nof_cls(self.minisat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.minisat:
return pysolvers.minisat22_acc_stats(self.minisat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.minisat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minisat:
res = pysolvers.minisat22_add_cl(self.minisat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MiniSat.
"""
raise NotImplementedError('Atmost constraints are not supported by MiniSat.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.minisat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
#
#==============================================================================
from pysat._utils import setup_pyx_import
class MinisatCsImporter:
Solver = None
@classmethod
def get(cls):
if cls.Solver is None:
with setup_pyx_import():
from solvers._minisatcs import Solver
cls.Solver = Solver
return cls
class MinisatCS:
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minisat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minisat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = MinisatCsImporter.get().Solver()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
pass
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = self.minisat.solve(assumptions, None, False)
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if expect_interrupt:
raise NotImplementedError('expect_interrupt=True is not supported by MinisatCS')
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = self.minisat.solve(assumptions, None, True)
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minisat:
self.minisat.set_conf_budget(budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
self.minisat.set_prop_budget(budget)
def get_status(self):
"""
Returns solver's status.
"""
if self.minisat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = self.minisat.get_model()
return model if model != None else []
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.minisat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.minisat:
return self.accu_time
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minisat:
res = self.minisat.add_clause(clause)
if res == False:
self.status = False
if not no_return:
return res
#
#==============================================================================
class MinisatGH(object):
"""
MiniSat SAT solver (version from github).
"""
def __init__(self, bootstrap_with=None, use_timer=False):
"""
Basic constructor.
"""
self.minisat = None
self.status = None
self.new(bootstrap_with, use_timer)
def __enter__(self):
"""
'with' constructor.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
'with' destructor.
"""
self.delete()
self.minisat = None
def new(self, bootstrap_with=None, use_timer=False):
"""
Actual constructor of the solver.
"""
if not self.minisat:
self.minisat = pysolvers.minisatgh_new()
if bootstrap_with:
if type(bootstrap_with) == CNFPlus and bootstrap_with.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in bootstrap_with:
self.add_clause(clause)
self.use_timer = use_timer
self.call_time = 0.0 # time spent for the last call to oracle
self.accu_time = 0.0 # time accumulated for all calls to oracle
def delete(self):
"""
Destructor.
"""
if self.minisat:
pysolvers.minisatgh_del(self.minisat)
self.minisat = None
def solve(self, assumptions=[]):
"""
Solve internal formula.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisatgh_solve(self.minisat, assumptions,
int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def solve_limited(self, assumptions=[], expect_interrupt=False):
"""
Solve internal formula using given budgets for conflicts and
propagations.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
self.status = pysolvers.minisatgh_solve_lim(self.minisat,
assumptions, int(MainThread.check()), int(expect_interrupt))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return self.status
def conf_budget(self, budget):
"""
Set limit on the number of conflicts.
"""
if self.minisat:
pysolvers.minisatgh_cbudget(self.minisat, budget)
def prop_budget(self, budget):
"""
Set limit on the number of propagations.
"""
if self.minisat:
pysolvers.minisatgh_pbudget(self.minisat, budget)
def interrupt(self):
"""
Interrupt solver execution.
"""
if self.minisat:
pysolvers.minisatgh_interrupt(self.minisat)
def clear_interrupt(self):
"""
Clears an interruption.
"""
if self.minisat:
pysolvers.minisatgh_clearint(self.minisat)
def propagate(self, assumptions=[], phase_saving=0):
"""
Propagate a given set of assumption literals.
"""
if self.minisat:
if self.use_timer:
start_time = process_time()
st, props = pysolvers.minisatgh_propagate(self.minisat,
assumptions, phase_saving, int(MainThread.check()))
if self.use_timer:
self.call_time = process_time() - start_time
self.accu_time += self.call_time
return bool(st), props if props != None else []
def set_phases(self, literals=[]):
"""
Sets polarities of a given list of variables.
"""
if self.minisat:
pysolvers.minisatgh_setphases(self.minisat, literals)
def get_status(self):
"""
Returns solver's status.
"""
if self.minisat:
return self.status
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.minisat and self.status == True:
model = pysolvers.minisatgh_model(self.minisat)
return model if model != None else []
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.minisat and self.status == False:
return pysolvers.minisatgh_core(self.minisat)
def get_proof(self):
"""
Get a proof produced while deciding the formula.
"""
raise NotImplementedError('Proof tracing is not supported by MiniSat.')
def time(self):
"""
Get time spent for the last call to oracle.
"""
if self.minisat:
return self.call_time
def time_accum(self):
"""
Get time accumulated for all calls to oracle.
"""
if self.minisat:
return self.accu_time
def nof_vars(self):
"""
Get number of variables currently used by the solver.
"""
if self.minisat:
return pysolvers.minisatgh_nof_vars(self.minisat)
def nof_clauses(self):
"""
Get number of clauses currently used by the solver.
"""
if self.minisat:
return pysolvers.minisatgh_nof_cls(self.minisat)
def accum_stats(self):
"""
Get accumulated low-level stats from the solver. This includes
the number of restarts, conflicts, decisions and propagations.
"""
if self.minisat:
return pysolvers.minisatgh_acc_stats(self.minisat)
def enum_models(self, assumptions=[]):
"""
Iterate over models of the internal formula.
"""
if self.minisat:
done = False
while not done:
self.status = self.solve(assumptions=assumptions)
model = self.get_model()
if model is not None:
self.add_clause([-l for l in model]) # blocking model
yield model
else:
done = True
def add_clause(self, clause, no_return=True):
"""
Add a new clause to solver's internal formula.
"""
if self.minisat:
res = pysolvers.minisatgh_add_cl(self.minisat, clause)
if res == False:
self.status = False
if not no_return:
return res
def add_atmost(self, lits, k, no_return=True):
"""
Atmost constraints are not supported by MiniSat.
"""
raise NotImplementedError('Atmost constraints are not supported by MiniSat.')
def append_formula(self, formula, no_return=True):
"""
Appends list of clauses to solver's internal formula.
"""
if self.minisat:
res = None
if type(formula) == CNFPlus and formula.atmosts:
raise NotImplementedError('Atmost constraints are not supported by MiniSat')
for clause in formula:
res = self.add_clause(clause, no_return)
if not no_return and res == False:
return res
if not no_return:
return res
def supports_atmost(self):
"""
This method can be called to determine whether the solver supports
native AtMostK (see :mod:`pysat.card`) constraints.
"""
return False
|
py
|
1a5dc2f3aa384e3835c4440fec8bc760bd6d4256
|
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Surface Heat Diffuse Skinning",
"author": "mesh online",
"version": (3, 4, 0),
"blender": (2, 80, 0),
"location": "View3D > UI > Mesh Online",
"description": "Surface Heat Diffuse Skinning",
"warning": "",
"wiki_url": "http://www.mesh-online.net/vhd.html",
"category": "Object"
}
import bpy
import sys
import os
import time
import platform
from subprocess import PIPE, Popen
from threading import Thread
from bpy.props import *
from queue import Queue, Empty
class SFC_OT_ModalTimerOperator(bpy.types.Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.surface_heat_diffuse"
bl_label = "Surface Heat Diffuse Skinning"
bl_options = {'REGISTER', 'UNDO'}
_timer = None
_pid = None
_queue = None
_objs = []
_permulation = []
_selected_indices = []
_selected_group_index_weights = []
_start_time = None
def write_bone_data(self, obj, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse bone export.\n")
amt = obj.data
bpy.ops.object.mode_set(mode='EDIT')
for bone in amt.edit_bones:
if bone.use_deform:
world_bone_head = obj.matrix_world @ bone.head
world_bone_tail = obj.matrix_world @ bone.tail
f.write("b,{},{:.6f},{:.6f},{:.6f},{:.6f},{:.6f},{:.6f}\n".format(
bone.name.replace(",", "\\;"), world_bone_head[0], world_bone_head[1], world_bone_head[2],
world_bone_tail[0], world_bone_tail[1], world_bone_tail[2]))
bpy.ops.object.mode_set(mode='OBJECT')
f.close()
def write_mesh_data(self, objs, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse mesh export.\n")
vertex_offset = 0
for obj in objs:
for v in obj.data.vertices:
world_v_co = obj.matrix_world @ v.co
f.write("v,{:.6f},{:.6f},{:.6f}\n".format(world_v_co[0], world_v_co[1], world_v_co[2]))
for poly in obj.data.polygons:
f.write("f");
for loop_ind in poly.loop_indices:
vert_ind = obj.data.loops[loop_ind].vertex_index
f.write(",{}".format(vertex_offset + vert_ind))
f.write("\n")
vertex_offset += len(obj.data.vertices)
f.close()
def read_weight_data(self, objs, filepath):
# make permulation for all vertices
vertex_offset = 0;
for obj in objs:
for index in range(len(obj.data.vertices)):
self._permulation.append((vertex_offset + index, index, obj))
vertex_offset += len(obj.data.vertices)
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# get selected vertex indices
self._selected_indices.append([i.index for i in obj.data.vertices if i.select])
self._selected_group_index_weights.append([])
# push protected vertices weight
for vert_ind in self._selected_indices[index]:
for g in obj.data.vertices[vert_ind].groups:
self._selected_group_index_weights[index].append((obj.vertex_groups[g.group].name, vert_ind, g.weight))
f = open(filepath, 'r', encoding='utf-8')
bones = []
for line in f:
if len(line) == 0:
continue
tokens = line.strip("\r\n").split(",")
if tokens[0] == "b":
group_name = tokens[1].replace("\\;", ",")
bones.append(group_name)
for obj in objs:
#check for existing group with the same name
if None != obj.vertex_groups.get(group_name):
group = obj.vertex_groups[group_name]
obj.vertex_groups.remove(group)
obj.vertex_groups.new(name = group_name)
if tokens[0] == "w":
group_name = bones[int(tokens[2])]
index = int(tokens[1])
vert_ind = self._permulation[index][1]
weight = float(tokens[3])
obj = self._permulation[index][2]
# protect vertices weight
if bpy.context.scene.surface_protect and vert_ind in self._selected_indices[objs.index(obj)]:
continue
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
f.close()
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# pop protected vertices weight
for (group_name, vert_ind, weight) in self._selected_group_index_weights[index]:
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
def modal(self, context, event):
if event.type == 'ESC':
self._pid.terminate()
return self.cancel(context)
if event.type == 'TIMER':
# background task is still running
if None == self._pid.poll():
# read line without blocking
try: rawline = self._queue.get_nowait()
except Empty:
pass
else:
line = rawline.decode().strip("\r\n")
self.report({'INFO'}, line)
else:
# background task finished running
self.read_weight_data(self._objs, os.path.join(os.path.dirname(__file__), "data", "untitled-weight.txt"))
running_time = time.time() - self._start_time
self.report({'INFO'}, "".join(("Complete, ", "running time: ", \
str(int(running_time / 60))," minutes ", str(int(running_time % 60)), " seconds")))
# bind meshes to the armature
bpy.ops.object.parent_set(type='ARMATURE')
return self.cancel(context)
return {'RUNNING_MODAL'}
def execute(self, context):
arm_count = 0
obj_count = 0
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm_count += 1
if 'MESH' == ob.type:
obj_count += 1
if not (context.mode == 'OBJECT' and arm_count == 1 and obj_count >= 1):
self.report({'ERROR'}, "Please select one armature and at least one mesh in 'OBJECT' mode, then try again.")
return {'CANCELLED'}
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
arm = None
objs = []
# get armature and mesh
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm = ob
if 'MESH' == ob.type:
objs.append(ob)
# sort meshes by name
objs.sort(key=lambda obj:obj.name);
# save the reference for later use
self._objs = objs
for obj in objs:
# focus on the mesh
bpy.context.view_layer.objects.active = obj
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write mesh data
self.write_mesh_data(objs, os.path.join(os.path.dirname(__file__), "data", "untitled-mesh.txt"))
# we must focus on the armature before we can write bone data
bpy.context.view_layer.objects.active = arm
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write bone data
self.write_bone_data(arm, os.path.join(os.path.dirname(__file__), "data", "untitled-bone.txt"))
# do voxel skinning in background
ON_POSIX = 'posix' in sys.builtin_module_names
# chmod
if ON_POSIX:
os.chmod(os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd"), 0o755)
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
executable_path = None
if platform.system() == 'Windows':
if platform.machine().endswith('64'):
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x64", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x86", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd")
self._pid = Popen([executable_path,
"untitled-mesh.txt",
"untitled-bone.txt",
"untitled-weight.txt",
str(context.scene.surface_resolution),
str(context.scene.surface_loops),
str(context.scene.surface_samples),
str(context.scene.surface_influence),
str(context.scene.surface_falloff),
context.scene.surface_sharpness,
"y" if context.scene.detect_surface_solidify else "n"],
cwd = os.path.join(os.path.dirname(__file__), "data"),
stdout = PIPE,
bufsize = 1,
close_fds = ON_POSIX)
self._queue = Queue()
t = Thread(target=enqueue_output, args=(self._pid.stdout, self._queue))
t.daemon = True
t.start()
self._start_time = time.time()
# start timer to poll data
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
# remove timer
context.window_manager.event_timer_remove(self._timer)
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
return {'CANCELLED'}
def init_properties():
bpy.types.Scene.surface_resolution = IntProperty(
name = "Voxel Resolution",
description = "Maximum voxel grid size",
default = 128,
min = 32,
max = 1024)
bpy.types.Scene.surface_loops = IntProperty(
name = "Diffuse Loops",
description = "Heat diffuse pass = Voxel Resolution * Diffuse Loops",
default = 5,
min = 1,
max = 9)
bpy.types.Scene.surface_samples = IntProperty(
name = "Sample Rays",
description = "Ray samples count",
default = 64,
min = 32,
max = 128)
bpy.types.Scene.surface_influence = IntProperty(
name = "Influence Bones",
description = "Max influence bones per vertex, please decrease the value (such as 4) for mobile devices",
default = 8,
min = 1,
max = 128)
bpy.types.Scene.surface_falloff = FloatProperty(
name = "Diffuse Falloff",
description = "Heat diffuse falloff",
default = 0.2,
min = 0.01,
max = 0.99)
bpy.types.Scene.surface_protect = BoolProperty(
name = "Protect Selected Vertex Weight",
description = "Protect selected vertex weight",
default = False)
bpy.types.Scene.surface_sharpness = EnumProperty(
name = "Edges",
description = "Edges",
items = [
('1','Soft','Soft Curvature'),
('2','Normal','Normal Curvature'),
('3','Sharp','Sharp Curvature'),
('4','Sharpest','Sharpest Curvature')],
default = '3')
bpy.types.Scene.detect_surface_solidify = BoolProperty(
name = "Detect Solidify",
description = "Detect solidified clothes, if you enable this option, make sure that all bones are in the charecter's volume, otherwise, the result may be wrong",
default = False)
def clear_properties():
props = ["surface_resolution",
"surface_samples",
"surface_falloff",
"surface_loops",
"surface_influence",
"surface_protect"]
for p in props:
if p in bpy.types.Scene.bl_rna.properties:
exec("del bpy.types.Scene." + p)
class SFC_PT_SurfaceHeatDiffuseSkinningPanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "Surface Heat Diffuse Skinning"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Mesh Online'
@classmethod
def poll(self, context):
return True
def draw(self, context):
layout = self.layout
layout.prop(context.scene, 'surface_resolution', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_loops', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_samples', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_influence', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_falloff', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_sharpness')
layout.prop(context.scene, 'surface_protect')
layout.prop(context.scene, 'detect_surface_solidify')
row = layout.row()
row.operator("wm.surface_heat_diffuse")
def register():
bpy.utils.register_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.register_class(SFC_OT_ModalTimerOperator)
init_properties()
def unregister():
bpy.utils.unregister_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.unregister_class(SFC_OT_ModalTimerOperator)
clear_properties()
if __name__ == "__main__":
register()
|
py
|
1a5dc32c88aee77d35bae5487d1c6818b21e2941
|
from mock import Mock
from twisted.internet.defer import maybeDeferred, succeed
from synapse.events import FrozenEvent
from synapse.types import Requester, UserID
from synapse.util import Clock
from synapse.util.logcontext import LoggingContext
from tests import unittest
from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver
class MessageAcceptTests(unittest.TestCase):
def setUp(self):
self.http_client = Mock()
self.reactor = ThreadedMemoryReactorClock()
self.hs_clock = Clock(self.reactor)
self.homeserver = setup_test_homeserver(
self.addCleanup,
http_client=self.http_client,
clock=self.hs_clock,
reactor=self.reactor,
)
user_id = UserID("us", "test")
our_user = Requester(user_id, None, False, None, None)
room_creator = self.homeserver.get_room_creation_handler()
room = room_creator.create_room(
our_user, room_creator.PRESETS_DICT["public_chat"], ratelimit=False
)
self.reactor.advance(0.1)
self.room_id = self.successResultOf(room)["room_id"]
# Figure out what the most recent event is
most_recent = self.successResultOf(
maybeDeferred(
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
)[0]
join_event = FrozenEvent(
{
"room_id": self.room_id,
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$join:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.member",
"origin": "test.servx",
"content": {"membership": "join"},
"auth_events": [],
"prev_state": [(most_recent, {})],
"prev_events": [(most_recent, {})],
}
)
self.handler = self.homeserver.get_handlers().federation_handler
self.handler.do_auth = lambda *a, **b: succeed(True)
self.client = self.homeserver.get_federation_client()
self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
pdus
)
# Send the join, it should return None (which is not an error)
d = self.handler.on_receive_pdu(
"test.serv", join_event, sent_to_us_directly=True
)
self.reactor.advance(1)
self.assertEqual(self.successResultOf(d), None)
# Make sure we actually joined the room
self.assertEqual(
self.successResultOf(
maybeDeferred(
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
)[0],
"$join:test.serv",
)
def test_cant_hide_direct_ancestors(self):
"""
If you send a message, you must be able to provide the direct
prev_events that said event references.
"""
def post_json(destination, path, data, headers=None, timeout=0):
# If it asks us for new missing events, give them NOTHING
if path.startswith("/_matrix/federation/v1/get_missing_events/"):
return {"events": []}
self.http_client.post_json = post_json
# Figure out what the most recent event is
most_recent = self.successResultOf(
maybeDeferred(
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
)[0]
# Now lie about an event
lying_event = FrozenEvent(
{
"room_id": self.room_id,
"sender": "@baduser:test.serv",
"event_id": "one:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.message",
"origin": "test.serv",
"content": {"body": "hewwo?"},
"auth_events": [],
"prev_events": [("two:test.serv", {}), (most_recent, {})],
}
)
with LoggingContext(request="lying_event"):
d = self.handler.on_receive_pdu(
"test.serv", lying_event, sent_to_us_directly=True
)
# Step the reactor, so the database fetches come back
self.reactor.advance(1)
# on_receive_pdu should throw an error
failure = self.failureResultOf(d)
self.assertEqual(
failure.value.args[0],
(
"ERROR 403: Your server isn't divulging details about prev_events "
"referenced in this event."
),
)
# Make sure the invalid event isn't there
extrem = maybeDeferred(
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
|
py
|
1a5dc4c31e0ef0338cd3a8cc9ea247f0a81c5ffd
|
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class FlipLR(function.Function):
"""Flip array in the left/right direction."""
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= 2)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
return xp.fliplr(inputs[0]),
def backward(self, inputs, grads):
xp = cuda.get_array_module(*inputs)
return xp.fliplr(grads[0]),
def fliplr(a):
"""Flip array in the left/right direction.
Args:
xs (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return FlipLR()(a)
|
py
|
1a5dc4c33ec63fb1fc67caea2e7745049425aafd
|
#!/usr/bin/env python
import sys
from os import path
from setuptools import find_namespace_packages, setup
with open("arcade/version.py") as file:
exec(file.read())
def get_long_description() -> str:
fname = path.join(path.dirname(path.abspath(__file__)), "README.rst")
with open(fname, "r") as f:
return f.read()
setup(
name="arcade",
description="Arcade Game Development Library",
long_description=get_long_description(),
author="Paul Vincent Craven",
author_email="[email protected]",
license="MIT",
url="https://api.arcade.academy",
download_url="https://api.arcade.academy",
install_requires=[
"pyglet==2.0.dev13",
"pillow~=9.0.0",
"pymunk~=6.2.1",
"pytiled-parser==2.0.1",
],
extras_require={
"dev": [
"pytest",
"flake8",
"mypy",
"coverage",
"coveralls",
"pytest-mock",
"pytest-cov",
"sphinx",
"sphinx-sitemap",
"sphinx_rtd_theme",
"sphinx_copybutton",
"dirsync",
"wheel",
],
},
packages=find_namespace_packages(
include=["arcade", "arcade.*"],
exclude=[],
),
python_requires=">=3.7",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Python Modules",
],
include_package_data=True,
project_urls={
"Documentation": "https://api.arcade.academy/",
"Example Code": "https://api.arcade.academy/en/latest/examples/index.html",
"Issue Tracker": "https://github.com/pythonarcade/arcade/issues",
"Source": "https://github.com/pythonarcade/arcade",
"On-line Book": "https://learn.arcade.academy",
},
version=VERSION,
)
|
py
|
1a5dc52cb9a9d576186f02319921455744541fbb
|
from PIL import Image
from io import BytesIO
import requests
# 打开图像文件
img = Image.open('C:/Users/yanhao/Pictures/Saved Pictures/风景1.jpg')
# 从文件流中打开图像
r = requests.get('http://f.hiphotos.baidu.com/image/pic/item/b151f8198618367aa7f3cc7424738bd4b31ce525.jpg')
img1 = Image.open(BytesIO(r.content))
# 展示图像
#img.show()
img1.show()
# 翻转90度展示
img1.rotate(90).show()
|
py
|
1a5dc53df2785e4585ec7b69ca65e20e1ae95d95
|
# modify the globals
import config
import os, sys
from pathlib import Path
data_file_name = 'owid-covid-data.json'
config.CURRENT_DIR_STR = os.path.dirname(__file__)
config.DATA_FILE_STR = os.path.join(config.CURRENT_DIR_STR, 'data', data_file_name)
config.ARGO_PACKAGE_STR = os.path.join(config.CURRENT_DIR_STR, 'colchis')
config.DATA_FILE_PATH = Path(config.DATA_FILE_STR)
sys.path.append(config.DATA_FILE_PATH)
config.ARGO_PACKAGE_PATH = Path(config.ARGO_PACKAGE_STR)
sys.path.append(config.ARGO_PACKAGE_PATH)
|
py
|
1a5dc88d3d11705462faff5f4f8c87d2a374eeee
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageStorageProfile(Model):
"""Describes a storage profile.
:param os_disk: The OS disk.
:type os_disk: :class:`ImageOSDisk
<azure.mgmt.compute.compute.v2017_03_30.models.ImageOSDisk>`
:param data_disks: The data disks.
:type data_disks: list of :class:`ImageDataDisk
<azure.mgmt.compute.compute.v2017_03_30.models.ImageDataDisk>`
"""
_validation = {
'os_disk': {'required': True},
}
_attribute_map = {
'os_disk': {'key': 'osDisk', 'type': 'ImageOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[ImageDataDisk]'},
}
def __init__(self, os_disk, data_disks=None):
self.os_disk = os_disk
self.data_disks = data_disks
|
py
|
1a5dc891e8e4e2b2f6ad97e1913d2d6f607d2072
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class RevocationStatus(object):
"""
The current revocation status of the certificate or certificate authority (CA).
"""
#: A constant which can be used with the revocation_reason property of a RevocationStatus.
#: This constant has a value of "UNSPECIFIED"
REVOCATION_REASON_UNSPECIFIED = "UNSPECIFIED"
#: A constant which can be used with the revocation_reason property of a RevocationStatus.
#: This constant has a value of "KEY_COMPROMISE"
REVOCATION_REASON_KEY_COMPROMISE = "KEY_COMPROMISE"
#: A constant which can be used with the revocation_reason property of a RevocationStatus.
#: This constant has a value of "CA_COMPROMISE"
REVOCATION_REASON_CA_COMPROMISE = "CA_COMPROMISE"
#: A constant which can be used with the revocation_reason property of a RevocationStatus.
#: This constant has a value of "AFFILIATION_CHANGED"
REVOCATION_REASON_AFFILIATION_CHANGED = "AFFILIATION_CHANGED"
#: A constant which can be used with the revocation_reason property of a RevocationStatus.
#: This constant has a value of "SUPERSEDED"
REVOCATION_REASON_SUPERSEDED = "SUPERSEDED"
#: A constant which can be used with the revocation_reason property of a RevocationStatus.
#: This constant has a value of "CESSATION_OF_OPERATION"
REVOCATION_REASON_CESSATION_OF_OPERATION = "CESSATION_OF_OPERATION"
#: A constant which can be used with the revocation_reason property of a RevocationStatus.
#: This constant has a value of "PRIVILEGE_WITHDRAWN"
REVOCATION_REASON_PRIVILEGE_WITHDRAWN = "PRIVILEGE_WITHDRAWN"
#: A constant which can be used with the revocation_reason property of a RevocationStatus.
#: This constant has a value of "AA_COMPROMISE"
REVOCATION_REASON_AA_COMPROMISE = "AA_COMPROMISE"
def __init__(self, **kwargs):
"""
Initializes a new RevocationStatus object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param time_revoked:
The value to assign to the time_revoked property of this RevocationStatus.
:type time_revoked: datetime
:param revocation_reason:
The value to assign to the revocation_reason property of this RevocationStatus.
Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type revocation_reason: str
"""
self.swagger_types = {
'time_revoked': 'datetime',
'revocation_reason': 'str'
}
self.attribute_map = {
'time_revoked': 'timeRevoked',
'revocation_reason': 'revocationReason'
}
self._time_revoked = None
self._revocation_reason = None
@property
def time_revoked(self):
"""
**[Required]** Gets the time_revoked of this RevocationStatus.
The time when the certificate or CA was revoked.
:return: The time_revoked of this RevocationStatus.
:rtype: datetime
"""
return self._time_revoked
@time_revoked.setter
def time_revoked(self, time_revoked):
"""
Sets the time_revoked of this RevocationStatus.
The time when the certificate or CA was revoked.
:param time_revoked: The time_revoked of this RevocationStatus.
:type: datetime
"""
self._time_revoked = time_revoked
@property
def revocation_reason(self):
"""
**[Required]** Gets the revocation_reason of this RevocationStatus.
The reason that the certificate or CA was revoked.
Allowed values for this property are: "UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The revocation_reason of this RevocationStatus.
:rtype: str
"""
return self._revocation_reason
@revocation_reason.setter
def revocation_reason(self, revocation_reason):
"""
Sets the revocation_reason of this RevocationStatus.
The reason that the certificate or CA was revoked.
:param revocation_reason: The revocation_reason of this RevocationStatus.
:type: str
"""
allowed_values = ["UNSPECIFIED", "KEY_COMPROMISE", "CA_COMPROMISE", "AFFILIATION_CHANGED", "SUPERSEDED", "CESSATION_OF_OPERATION", "PRIVILEGE_WITHDRAWN", "AA_COMPROMISE"]
if not value_allowed_none_or_none_sentinel(revocation_reason, allowed_values):
revocation_reason = 'UNKNOWN_ENUM_VALUE'
self._revocation_reason = revocation_reason
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py
|
1a5dc95f6633dbfd3a570729eb98dd5f03e9b463
|
import django_tables2 as tables
from django_tables2.utils import A, Accessor
from mmg.jobtrak.links.models import *
from django.utils.translation import ugettext_lazy as _
import external_urls
class JobBoardTable(tables.Table):
url = tables.TemplateColumn(
'<a href="{% load external_urls %}{% external_url record.url %}" target="_blank">{{record.name}}</a>',
verbose_name="Web Site", order_by=A('name')
)
last_click = tables.DateTimeColumn(
format="D, j N",
verbose_name="Last Visit",
attrs={'td': {'nowrap': 'nowrap'}} )
note = tables.Column(verbose_name="Description")
class Meta:
model = JobBoard
attrs = { "class": "table" }
fields = ('url','last_click','note',)
|
py
|
1a5dc9d0eb33f2a0670662d49d28512c1e17bc86
|
import collections
from scipy.special import comb
import numpy as np
def _iter_key_sorted_dct(dct):
for k in sorted(dct.keys()):
yield k, dct[k]
def make_sum(dct_values, base=None):
"""base is some previous result"""
sum_cnt = collections.defaultdict(int)
if base is not None:
sum_cnt.update(base)
for v, n in _iter_key_sorted_dct(dct_values):
# to include from 1 to n elements of value v
dct = dict(sum_cnt)
for i in range(1, n + 1):
n_ways = comb(n, i)
increment = i * v # increment for sum by including n times v
sum_cnt[increment] += n_ways
for k, v_orig in _iter_key_sorted_dct(dct):
sum_cnt[k + increment] += n_ways * v_orig
return sum_cnt
class Jewelry(object):
def __init__(self):
self.values_ = None
self.ways_below_ = collections.defaultdict(int)
self.ways_below_[0] = 1
def __repr__(self):
return repr(self.values_)
def set_values(self, v):
self.values_ = collections.Counter(v)
def how_many(self, values):
self.set_values(values)
count = 0
values_for_above = dict(self.values_)
for v, cnt in _iter_key_sorted_dct(self.values_):
# Remove value v iteratively to get all the possible sums from
# the values above v
values_for_above.pop(v)
ways_above_exclude_v = make_sum(values_for_above)
ways_below_exclude_v = dict(self.ways_below_)
for i in range(1, cnt + 1):
n_ways = comb(cnt, i)
ways_below = collections.defaultdict(int)
for k, cnt_orig in _iter_key_sorted_dct(ways_below_exclude_v):
sum_with_iv = k + v * i
cnt_increment = n_ways * cnt_orig
ways_below[sum_with_iv] += cnt_increment
self.ways_below_[sum_with_iv] += cnt_increment
# The ways above can include cnt - i elements in maximum
ways_above = make_sum({v: cnt - i}, ways_above_exclude_v)
intersection = set(ways_below).intersection(ways_above)
count += np.sum([ways_below[k] * ways_above[k]
for k in intersection])
return count
|
py
|
1a5dca1e1dfb790534d1e0fc55ea7c72f0efa0d2
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .update_model_deployment_configuration_details import UpdateModelDeploymentConfigurationDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateSingleModelDeploymentConfigurationDetails(UpdateModelDeploymentConfigurationDetails):
"""
The single model type deployment for update.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateSingleModelDeploymentConfigurationDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.data_science.models.UpdateSingleModelDeploymentConfigurationDetails.deployment_type` attribute
of this class is ``SINGLE_MODEL`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param deployment_type:
The value to assign to the deployment_type property of this UpdateSingleModelDeploymentConfigurationDetails.
Allowed values for this property are: "SINGLE_MODEL"
:type deployment_type: str
:param model_configuration_details:
The value to assign to the model_configuration_details property of this UpdateSingleModelDeploymentConfigurationDetails.
:type model_configuration_details: oci.data_science.models.UpdateModelConfigurationDetails
"""
self.swagger_types = {
'deployment_type': 'str',
'model_configuration_details': 'UpdateModelConfigurationDetails'
}
self.attribute_map = {
'deployment_type': 'deploymentType',
'model_configuration_details': 'modelConfigurationDetails'
}
self._deployment_type = None
self._model_configuration_details = None
self._deployment_type = 'SINGLE_MODEL'
@property
def model_configuration_details(self):
"""
Gets the model_configuration_details of this UpdateSingleModelDeploymentConfigurationDetails.
:return: The model_configuration_details of this UpdateSingleModelDeploymentConfigurationDetails.
:rtype: oci.data_science.models.UpdateModelConfigurationDetails
"""
return self._model_configuration_details
@model_configuration_details.setter
def model_configuration_details(self, model_configuration_details):
"""
Sets the model_configuration_details of this UpdateSingleModelDeploymentConfigurationDetails.
:param model_configuration_details: The model_configuration_details of this UpdateSingleModelDeploymentConfigurationDetails.
:type: oci.data_science.models.UpdateModelConfigurationDetails
"""
self._model_configuration_details = model_configuration_details
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py
|
1a5dca2554d5ace7741a8fda125511a45152d70a
|
import csv
import json
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
# Skips the header of the csv
finCsv = fin[1:]
finalCsv = df[1:]
obj = {}
# loop through the csv with images
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
# create a final object
finalObj = {}
# check full csv
for i in finCsv:
x = i.split(',')
id = x[6]
# create an object with the key of the id regardless
finalObj[id]= {}
row = fin[0].split(',')
# if the id has an image add it to the final object
if id in obj:
finalObj[id]['before_img'] = obj[id]['before_img']
finalObj[id]['after_img'] = obj[id]['after_img'][0:-1]
for i in range(len(row)):
key = row[i].replace(' ',"_")
key = key.strip()
val = x[i].strip()
# 8 is the position of the latitude
if i == 8:
key = 'latitude'
# val = float(val)
if i == 9:
key = 'longitude'
if i == 11:
continue
try:
val = int(val)
except ValueError:
val = val
finalObj[id][key.lower()] = val
print(finalObj['1013351'])
with open('results.json','w') as fp:
json.dump(finalObj,fp,indent=4)
|
py
|
1a5dca69bfd62bd97216fa2bce999805eb121e87
|
import socketserver
import threading
import mimetypes
import os
import os.path
import collections
import json
import logging
import io
import h2.connection
import h2.events
log = logging.getLogger(__name__)
AUTHORITY = u'localhost:6001'
# header-body pair for each stream
request_data = collections.namedtuple('request_data', ['headers', 'data'])
# info needed to send message to a client
stream_conn_sock = collections.namedtuple('stream_conn_sock',
['stream_id', 'connection', 'socket'])
clients = { }
class ThreadingTCPServer(socketserver.ThreadingMixIn,
socketserver.TCPServer):
allow_reuse_address = True
class MyH2Handler(socketserver.StreamRequestHandler):
connection = None
# store headers-body pair of each stream
stream_data = { }
# every socket represents a client, which has a special id
client_id = None
# store functions that handle the body
body_handlers = { }
def initiate_client(self, stream_id):
# get the current client's id from request body
body = self.stream_data[stream_id].data.getvalue().decode('utf-8')
log.debug(body)
bodyjson = json.loads(body)
self.client_id = bodyjson['client']
log.debug('client id %s', self.client_id)
# save the information needed to send message to this client
socket = self.request
s_c_s = stream_conn_sock(stream_id, self.connection, socket)
log.info('reg client %s %s', self.client_id, s_c_s)
clients.update({self.client_id: s_c_s})
# inform client that it's okay to start the chat now
ok = b'ready to continue'
headers = collections.OrderedDict([(':status', '200'),
('server','http2'),
('content-length', len(ok))])
self.connection.send_headers(stream_id, headers)
self.connection.send_data(stream_id, ok)
self.request.sendall(self.connection.data_to_send())
def send_message(self, stream_id):
# get message and receiver
body = self.stream_data[stream_id].data.getvalue().decode('utf-8')
bodyjson = json.loads(body)
receiver = bodyjson['to']
message = bodyjson['message'].encode('utf-8')
# get receiver "address"
r_stream, r_conn, r_socket = clients[receiver]
# initiate push request to receiver
request_headers = collections.OrderedDict([(':status', '200'),
('server', 'http2')])
new_stream_id = r_conn.get_next_available_stream_id()
log.info('push req %s %s %s %s', request_headers, r_stream, r_conn, r_socket)
r_conn.push_stream(r_stream, new_stream_id, request_headers)
r_socket.sendall(r_conn.data_to_send())
# push message to receiver
r_response_headers = collections.OrderedDict([(':status', '200'),
(':authority', AUTHORITY),
('server', 'http2'),
('content-length', len(message))])
r_conn.send_headers(new_stream_id, r_response_headers)
log.info('push resp %s %s %s', message, r_stream, r_conn)
r_conn.send_data(new_stream_id, message, end_stream = True)
r_socket.sendall(r_conn.data_to_send())
# inform sender that message is sent
'''
sent = b'sent'
response_headers = collections.OrderedDict([(':status', '200'),
('server', 'http2'),
('content_length', len(sent))])
self.connection.send_headers(stream_id, response_headers)
self.connection.send_data(stream_id, sent)
self.request.sendall(self.connection.data_to_send())
'''
def end_chat(stream_id):
# close receiving channel
r_stream_id, r_conn, socket = clients[self.client_id]
r_response_headers = collection.OrderedDict([(':status', '200'),
('server', 'http2')])
r_conn.send_headers(r_stream_id, r_response_headers, end_stream = True)
socket.sendall(r_conn.data_to_send())
# inform client and close connection
ended = b'chat ended'
response_headers = collection.OrderedDict([(':status', '200'),
('server', 'http2'),
('content-length', len(ended))])
self.connection.send_headers(stream_id, response_headers)
self.connection.send_data(stream_id, ended, end_stream = True)
self.request.sendall(self.connection.data_to_send())
self.connection.close_connection()
self.request.close()
def request_received(self, headers, stream_id):
headers = collections.OrderedDict(headers)
# store headers (to match with request body)
r_d = request_data(headers, io.BytesIO())
self.stream_data[stream_id] = r_d
# find out what the client intends to do
path = headers[':path']
route = os.path.basename(os.path.normpath(path))
log.info('request path %s at %s', path, stream_id)
if route == 'login':
self.body_handlers[stream_id] = self.initiate_client
elif route == 'send':
self.body_handlers[stream_id] = self.send_message
elif route == 'end':
self.end_chat(stream_id)
else:
return
def data_received(self, data, stream_id):
s_d = self.stream_data[stream_id]
s_d.data.write(data)
fn = self.body_handlers[stream_id]
if fn :
log.info('dispatch %s with %s', stream_id, fn)
fn(stream_id)
def handle(self):
self.connection = h2.connection.H2Connection(client_side = False)
self.connection.initiate_connection()
self.request.sendall(self.connection.data_to_send())
log.debug('init pass')
while True:
data = self.request.recv(65535)
events = self.connection.receive_data(data)
for event in events:
if isinstance(event, h2.events.RequestReceived):
self.request_received(event.headers, event.stream_id)
if isinstance(event, h2.events.DataReceived):
self.data_received(event.data, event.stream_id)
if isinstance(event, h2.events.StreamEnded):
self.server.shutdown()
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
host, port = '', 6001
httpd = ThreadingTCPServer((host, port), MyH2Handler)
httpd.serve_forever()
|
py
|
1a5dca8fa05459f392aa73350c98184a38643f2d
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from unittest import mock
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.test.utils import override_settings
from django.urls import reverse
from horizon import tables as horizon_tables
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.images.images import forms
from openstack_dashboard.dashboards.project.images.images import tables
IMAGES_INDEX_URL = reverse('horizon:project:images:index')
class CreateImageFormTests(test.ResetImageAPIVersionMixin, test.TestCase):
@mock.patch.object(api.glance, 'get_image_formats')
@mock.patch.object(api.glance, 'image_list_detailed')
def test_no_location_or_file(self, mock_image_list, mock_schemas_list):
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_calls = [
mock.call(test.IsA(dict), filters={'disk_format': 'aki'}),
mock.call(test.IsA(dict), filters={'disk_format': 'ari'})
]
post = {
'name': 'Ubuntu 11.10',
'source_type': 'file',
'description': 'Login with admin/admin',
'disk_format': 'qcow2',
'architecture': 'x86-64',
'min_disk': 15,
'min_ram': 512,
'is_public': 1}
files = {}
form = forms.CreateImageForm(post, files)
self.assertFalse(form.is_valid())
mock_image_list.assert_has_calls(image_calls)
class UpdateImageFormTests(test.ResetImageAPIVersionMixin, test.TestCase):
def test_is_format_field_editable(self):
form = forms.UpdateImageForm({})
disk_format = form.fields['disk_format']
self.assertFalse(disk_format.widget.attrs.get('readonly', False))
@mock.patch.object(api.glance, 'image_get')
def test_image_update(self, mock_image_get):
image = self.images.first()
mock_image_get.return_value = image
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertEqual(res.context['image'].disk_format,
image.disk_format)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
image.id)
@mock.patch.object(api.glance, 'image_get')
@mock.patch.object(api.glance, 'image_update')
def test_image_update_post_v2(self, mock_image_update, mock_image_get):
image = self.images.first()
data = {
'name': 'Ubuntu 11.10',
'image_id': str(image.id),
'description': 'Login with admin/admin',
'source_type': 'url',
'image_url': 'http://cloud-images.ubuntu.com/releases/'
'oneiric/release/ubuntu-11.10-server-cloudimg'
'-amd64-disk1.img',
'disk_format': 'qcow2',
'architecture': 'x86-64',
'min_disk': 15,
'min_ram': 512,
'is_public': False,
'protected': False,
'method': 'UpdateImageForm'}
mock_image_get.return_value = image
mock_image_update.return_value = image
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
str(image.id))
mock_image_update.assert_called_once_with(
test.IsHttpRequest(),
image.id,
visibility='private',
protected=data['protected'],
disk_format=data['disk_format'],
container_format="bare",
name=data['name'],
min_ram=data['min_ram'],
min_disk=data['min_disk'],
description=data['description'],
architecture=data['architecture'])
class ImageViewTests(test.ResetImageAPIVersionMixin, test.TestCase):
@mock.patch.object(api.glance, 'get_image_schemas')
@mock.patch.object(api.glance, 'image_list_detailed')
def test_image_create_get(self, mock_image_list, mock_schemas_list):
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_calls = [
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'})
]
url = reverse('horizon:project:images:images:create')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/images/images/create.html')
mock_image_list.assert_has_calls(image_calls)
@override_settings(IMAGES_ALLOW_LOCATION=True)
@mock.patch.object(api.glance, 'get_image_schemas')
def test_image_create_post_location_v2(self, mock_schemas_list):
mock_schemas_list.return_value = self.image_schemas.first()
data = {
'source_type': 'url',
'image_url': 'http://cloud-images.ubuntu.com/releases/'
'oneiric/release/ubuntu-11.10-server-cloudimg'
'-amd64-disk1.img'}
api_data = {'location': data['image_url']}
self._test_image_create(data, api_data)
@mock.patch.object(api.glance, 'get_image_schemas')
def test_image_create_post_upload_v2(self, mock_schemas_list):
mock_schemas_list.return_value = self.image_schemas.first()
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {'source_type': 'file',
'image_file': temp_file}
api_data = {'data': test.IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@mock.patch.object(api.glance, 'get_image_schemas')
def test_image_create_post_with_kernel_ramdisk_v2(self, mock_schemas_list):
mock_schemas_list.return_value = self.image_schemas.first()
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {
'source_type': 'file',
'image_file': temp_file,
'kernel_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482e',
'ramdisk_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482a'
}
api_data = {'data': test.IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@mock.patch.object(api.glance, 'image_create')
@mock.patch.object(api.glance, 'image_list_detailed')
def _test_image_create(self, extra_form_data, extra_api_data,
mock_image_list, mock_image_create):
data = {
'name': 'Ubuntu 11.10',
'description': 'Login with admin/admin',
'disk_format': 'qcow2',
'architecture': 'x86-64',
'min_disk': 15,
'min_ram': 512,
'is_public': True,
'protected': False,
'method': 'CreateImageForm'}
data.update(extra_form_data)
api_data = {'container_format': 'bare',
'disk_format': data['disk_format'],
'protected': False,
'min_disk': data['min_disk'],
'min_ram': data['min_ram'],
'name': data['name']}
if api.glance.VERSIONS.active < 2:
api_data.update({'is_public': True,
'properties': {
'description': data['description'],
'architecture': data['architecture']}
})
else:
api_data.update({'visibility': 'public',
'description': data['description'],
'architecture': data['architecture']
})
api_data.update(extra_api_data)
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_list_calls = [
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'})
]
mock_image_create.return_value = self.images.first()
url = reverse('horizon:project:images:images:create')
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
mock_image_list.assert_has_calls(image_list_calls)
mock_image_create.assert_called_once_with(test.IsHttpRequest(),
**api_data)
@mock.patch.object(api.glance, 'image_get')
def _test_image_detail_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].name, image.name)
self.assertEqual(res.context['image'].protected, image.protected)
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
def test_image_detail_get_v2(self):
image = self.imagesV2.first()
self._test_image_detail_get(image)
@mock.patch.object(api.glance, 'image_get')
def _test_image_detail_custom_props_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
image_props = res.context['image_props']
# Test description property not displayed
image_keys = [prop[0] for prop in image_props]
self.assertNotIn(('description'), image_keys)
# Test custom properties are sorted
self.assertLess(image_props.index(('bar', 'bar', 'bar val')),
image_props.index(('foo', 'foo', 'foo val')))
# Test all custom properties appear in template
self.assertContains(res, '<dt title="bar">bar</dt>')
self.assertContains(res, '<dd>bar val</dd>')
self.assertContains(res, '<dt title="foo">foo</dt>')
self.assertContains(res, '<dd>foo val</dd>')
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
def test_image_detail_custom_props_get_v2(self):
image = self.imagesV2.list()[2]
self._test_image_detail_custom_props_get(image)
@mock.patch.object(api.glance, 'image_get')
def _test_protected_image_detail_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(
reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].protected, image.protected)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
image.id)
def test_protected_image_detail_get_v2(self):
image = self.imagesV2.list()[1]
self._test_protected_image_detail_get(image)
@mock.patch.object(api.glance, 'image_get')
def test_image_detail_get_with_exception(self, mock_image_get):
image = self.images.first()
mock_image_get.side_effect = self.exceptions.glance
url = reverse('horizon:project:images:images:detail',
args=[image.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_update_get(self, mock_image_get):
image = self.images.filter(is_public=True)[0]
mock_image_get.return_value = image
res = self.client.get(
reverse('horizon:project:images:images:update',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images/images/_update.html')
self.assertEqual(res.context['image'].name, image.name)
# Bug 1076216 - is_public checkbox not being set correctly
self.assertContains(res, "<input type='checkbox' id='id_is_public'"
" name='is_public' checked='checked'>",
html=True,
msg_prefix="The is_public checkbox is not checked")
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
class OwnerFilterTests(test.TestCase):
def setUp(self):
super().setUp()
self.table = mock.Mock(spec=horizon_tables.DataTable)
self.table.request = self.request
@override_settings(IMAGES_LIST_FILTER_TENANTS=[{'name': 'Official',
'tenant': 'officialtenant',
'icon': 'fa-check'}])
def test_filter(self):
all_images = self.images.list()
table = self.table
self.filter_tenants = settings.IMAGES_LIST_FILTER_TENANTS
filter_ = tables.OwnerFilter()
images = filter_.filter(table, all_images, 'project')
self.assertEqual(images, self._expected('project'))
images = filter_.filter(table, all_images, 'public')
self.assertEqual(images, self._expected('public'))
images = filter_.filter(table, all_images, 'shared')
self.assertEqual(images, self._expected('shared'))
images = filter_.filter(table, all_images, 'officialtenant')
self.assertEqual(images, self._expected('officialtenant'))
def _expected(self, filter_string):
my_tenant_id = self.request.user.tenant_id
images = self.images.list()
special = map(lambda t: t['tenant'], self.filter_tenants)
if filter_string == 'public':
return [im for im in images if im.is_public]
if filter_string == 'shared':
return [im for im in images
if (not im.is_public and
im.owner != my_tenant_id and
im.owner not in special)]
if filter_string == 'project':
filter_string = my_tenant_id
return [im for im in images if im.owner == filter_string]
|
py
|
1a5dcc91054c45580854afa853b07d8cdaacef6b
|
# contains bunch of buggy examples
# taken from https://hackernoon.com/10-common-security-gotchas-in-python-and-how-to-avoid-them-e19fbe265e03
# import cPickle
import subprocess
import base64
import subprocess
import flask
# Input injection
def transcode_file(request, filename):
command = 'ffmpeg -i "{source}" output_file.mpg'.format(source=filename)
subprocess.call(command, shell=True) # a bad idea!
# Assert statements
def assertAdmin(request, user):
assert user.is_admin, 'user does not have access'
# secure code...
# Pickles
class RunBinSh(object):
def __reduce__(self):
return (subprocess.Popen, (('/bin/sh',),))
@app.route('/')
def index():
module = flask.request.args.get("module")
exec("import urllib%s as urllib" % module) # Noncompliant
print(base64.b64encode(cPickle.dumps(RunBinSh())))
|
py
|
1a5dcdeafa198000dd065f190e917f0d4a2d1eec
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import uuid
# import the RestClient class
from epages_client.client import RestClient
# import base class for unit testing
from .base_unit_test import BaseUnitTest
class TestNewsletterMethods(BaseUnitTest):
'''A class for testing newsletter related methods on RestClient class'''
def setUp(self):
self.client = RestClient(
os.environ["EPAGES_API_URL"], os.environ["EPAGES_API_TOKEN"])
self.params = {
"query": {},
"param1": "",
"param2": ""
}
def test_001_get_newsletter_campaigns(self):
newsletter_campaigns = self.client.get_newsletter_campaigns(
self.params)
self.assertEqual(isinstance(newsletter_campaigns, dict), True)
def test_002_get_newsletter_campaign_subscribers_no_id(self):
with self.assertRaises(ValueError) as e:
newsletter_subscribers = self.client.get_newsletter_campaign_subscribers(
self.params)
def test_003_get_newsletter_campaign_subscribers_false_id(self):
self.params["param1"] = str(uuid.uuid4())
with self.assertRaises(RuntimeError) as e:
newsletter_subscribers = self.client.get_newsletter_campaign_subscribers(
self.params)
def test_004_get_newsletter_campaign_subscribers(self):
newsletter_campaigns = self.client.get_newsletter_campaigns(
self.params)
# If there are some newsletters, check if the first one has subscribers
if newsletter_campaigns["results"] > 0:
campaign_id = newsletter_campaigns["items"][0]["campaignId"]
self.params["param1"] = campaign_id
newsletter_subscribers = self.client.get_newsletter_campaign_subscribers(
self.params)
self.assertEqual(isinstance(newsletter_subscribers, dict), True)
|
py
|
1a5dd090b9b390155727fa6455c00183f598afae
|
import os
from flask import Flask, request, make_response, redirect, url_for, send_from_directory
from werkzeug.utils import secure_filename
from convert import convert_file
from cleanup import cleanup
with open('template.html', 'r') as inp:
template = inp.read()
app = Flask(__name__)
app.config['upload_folder'] = '../uploads'
@app.route('/upload', methods=['POST'])
def upload_file():
# check if the post request has the file part
if 'file' not in request.files:
resp = make_response('No file provided', 400)
return resp
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
resp = make_response('No file provided', 400)
return resp
if file and file.filename.lower().endswith('.docx'):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['upload_folder'], filename))
# Try to convert the file; redirect to success/fail page
try:
filename = convert_file(filename)
filename = cleanup(filename)
return redirect(url_for('converted_file', filename=filename))
except Exception as e:
return redirect(url_for('conversion_failure', error=e))
else:
resp = make_response(
f'Неправильный тип файла (требуется .docx): {file.filename}', 400)
return resp
@app.route('/result/<filename>', methods=['GET'])
def converted_file(filename):
download_url = url_for('download_file', filename=filename)
home_url = url_for('landing')
return template.format(
body=f'''<p>Файл был успешно конвертирован: <a href="{download_url}">{filename}</a></p>
<p><a href="{home_url}">Конвертировать другой файл</a>.</p>''')
@app.route('/download/<filename>', methods=['GET'])
def download_file(filename):
path = os.path.join('..', 'converted')
if not os.path.exists(os.path.join(path, filename)):
return make_response('File not found', 404)
return send_from_directory(path, filename)
@app.route('/failure/<filename>', methods=['GET'])
def conversion_failure(error):
return template.format(body=f'Ошибка конвертации ({error})')
@app.route('/', methods=['GET'])
def landing():
return template.format(body="""<h1>Загрузите файл в формате .docx</h1>
<form method="post" enctype="multipart/form-data" action="/upload">
<input type="file" name="file">
<input type="submit" value="Загрузить">
</form>""")
|
py
|
1a5dd15307a04124c6796114dfe5912ba71b33e4
|
#!/usr/bin/env python
"""
Usage example employing Lasagne for digit recognition using the MNIST dataset.
This example is deliberately structured as a long flat file, focusing on how
to use Lasagne, instead of focusing on writing maximally modular and reusable
code. It is used as the foundation for the introductory Lasagne tutorial:
http://lasagne.readthedocs.org/en/latest/user/tutorial.html
More in-depth examples and reproductions of paper results are maintained in
a separate repository: https://github.com/Lasagne/Recipes
"""
from __future__ import print_function
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
# ################## Download and prepare the MNIST dataset ##################
# This is just some way of getting the MNIST dataset from an online location
# and loading it into numpy arrays. It doesn't involve Lasagne at all.
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
return X_train, y_train, X_val, y_val, X_test, y_test
# ##################### Build the neural network model #######################
# This script supports three types of models. For each one, we define a
# function that takes a Theano variable representing the input and returns
# the output layer of a neural network model built in Lasagne.
def build_cnn(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return network
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
############################# Freeze Layers ###############################
# so they aren't updated during training
def freeze (layer):
for param in layer.params:
layer.params[param].discard('trainable')
return layer # optional, if you want to use it in-line
# usage:
# for layer in lasagne.layers.get_all_layers(output_layer):
# if layer is not output_layer:
# freeze(layer)
def unfreeze (layer):
for param in layer.params:
layer.params[param].discard('trainable')
return layer # optional, if you want to use it in-line
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model='cnn', num_epochs=100):
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
if model == 'cnn':
network = build_cnn(input_var)
else:
print("Unrecognized model type %r." % model)
return
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
# only train the FC layer
for layer in lasagne.layers.get_all_layers(output_layer): # This function gathers all layers below one or more given Layer instances, including the given layer(s).
if layer is not output_layer:
freeze(layer)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv):
print("Trains a neural network on MNIST using Lasagne.")
print("Usage: %s [MODEL [EPOCHS]]" % sys.argv[0])
print()
print("MODEL: 'mlp' for a simple Multi-Layer Perceptron (MLP),")
print(" 'custom_mlp:DEPTH,WIDTH,DROP_IN,DROP_HID' for an MLP")
print(" with DEPTH hidden layers of WIDTH units, DROP_IN")
print(" input dropout and DROP_HID hidden dropout,")
print(" 'cnn' for a simple Convolutional Neural Network (CNN).")
print("EPOCHS: number of training epochs to perform (default: 500)")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['model'] = sys.argv[1]
if len(sys.argv) > 2:
kwargs['num_epochs'] = int(sys.argv[2])
main(**kwargs)
|
py
|
1a5dd1eab2e6d36f959d96f07830aeabc23b841d
|
# Relationships!!
# Up until this point, you've only looked at one variable at a time. In this chapter, you'll explore relationships between variables two at a time, using scatter plots and other visualizations to extract insights from a new dataset obtained from the Behavioral Risk Factor Surveillance Survey (BRFSS). You'll also learn how to quantify those relationships using correlation and simple regression.
# PMF of age
# Do people tend to gain weight as they get older? We can answer this question by visualizing the relationship between weight and age. But before we make a scatter plot, it is a good idea to visualize distributions one variable at a time. Here, you'll visualize age using a bar chart first. Recall that all PMF objects have a .bar() method to make a bar chart.
# The BRFSS dataset includes a variable, 'AGE' (note the capitalization!), which represents each respondent's age. To protect respondents' privacy, ages are rounded off into 5-year bins. 'AGE' contains the midpoint of the bins
# Extract age
age = brfss['AGE']
# Plot the PMF
Pmf(age).bar()
# Label the axes
plt.xlabel('Age in years')
plt.ylabel('PMF')
plt.show()
# Scatter plot
# Now let's make a scatterplot of weight versus age. To make the code run faster, I've selected only the first 1000 rows from the brfss DataFrame.
# weight and age have already been extracted for you. Your job is to use plt.plot() to make a scatter plot.
# Select the first 1000 respondents
brfss = brfss[:1000]
# Extract age and weight
age = brfss['AGE']
weight = brfss['WTKG3']
# Make a scatter plot
plt.plot(age,weight, 'o', alpha = 0.1 )
plt.xlabel('Age in years')
plt.ylabel('Weight in kg')
plt.show()
# Jittering
# In the previous exercise, the ages fall in columns because they've been rounded into 5-year bins. If we jitter them, the scatter plot will show the relationship more clearly. Recall how Allen jittered height and weight in the video:
# height_jitter = height + np.random.normal(0, 2, size=len(brfss))
# weight_jitter = weight + np.random.normal(0, 2, size=len(brfss))
# Select the first 1000 respondents
brfss = brfss[:1000]
# Add jittering to age
age = brfss['AGE'] + np.random.normal(0,2.5, size = len(brfss))
# Extract weight
weight = brfss['WTKG3']
# Make a scatter plot
plt.plot(age, weight, 'o',markersize = 5, alpha =0.2 )
plt.xlabel('Age in years')
plt.ylabel('Weight in kg')
plt.show()
# Height and weight
# Previously we looked at a scatter plot of height and weight, and saw that taller people tend to be heavier. Now let's take a closer look using a box plot. The brfss DataFrame contains a variable '_HTMG10' that represents height in centimeters, binned into 10 cm groups.
# Recall how Allen created the box plot of 'AGE' and 'WTKG3' in the video, with the y-axis on a logarithmic scale:
# sns.boxplot(x='AGE', y='WTKG3', data=data, whis=10)
# plt.yscale('log')
# Drop rows with missing data
data = brfss.dropna(subset=['_HTMG10', 'WTKG3'])
# Make a box plot
sns.boxplot(x = '_HTMG10', y = 'WTKG3', data = data, whis = 10)
# Plot the y-axis on a log scale
plt.yscale('log')
# Remove unneeded lines and label axes
sns.despine(left=True, bottom=True)
plt.xlabel('Height in cm')
plt.ylabel('Weight in kg')
plt.show()
# Distribution of income
# In the next two exercises we'll look at relationships between income and other variables. In the BRFSS, income is represented as a categorical variable; that is, respondents are assigned to one of 8 income categories. The variable name is 'INCOME2'. Before we connect income with anything else, let's look at the distribution by computing the PMF. Recall that all Pmf objects have a .bar() method.
# Extract income
income = brfss['INCOME2']
# Plot the PMF
Pmf(income).bar()
# Label the axes
plt.xlabel('Income level')
plt.ylabel('PMF')
plt.show()
# Income and height
# Let's now use a violin plot to visualize the relationship between income and height.
# Drop rows with missing data
data = brfss.dropna(subset=['INCOME2', 'HTM4'])
# Make a violin plot
sns.violinplot(x = 'INCOME2', y ='HTM4', data=data, inner = None)
# Remove unneeded lines and label axes
sns.despine(left=True, bottom=True)
plt.xlabel('Income level')
plt.ylabel('Height in cm')
plt.show()
# Computing correlations
# The purpose of the BRFSS is to explore health risk factors, so it includes questions about diet. The variable '_VEGESU1' represents the number of servings of vegetables respondents reported eating per day.
# Let's see how this variable relates to age and income.
# Select columns
columns = ['AGE', 'INCOME2', '_VEGESU1']
subset = brfss[columns]
# Compute the correlation matrix
print(subset.corr())
# Income and vegetables
# As we saw in a previous exercise, the variable '_VEGESU1' represents the number of vegetable servings respondents reported eating per day.
# Let's estimate the slope of the relationship between vegetable consumption and income.
from scipy.stats import linregress
# Extract the variables
subset = brfss.dropna(subset=['INCOME2', '_VEGESU1'])
xs = subset['INCOME2']
ys = subset['_VEGESU1']
# Compute the linear regression
res = linregress(xs,ys)
print(res)
# Fit a line
# Continuing from the previous exercise:
# Assume that xs and ys contain income codes and daily vegetable consumption, respectively, and
# res contains the results of a simple linear regression of ys onto xs.
# Now, you're going to compute the line of best fit. NumPy has been imported for you as np.
# Plot the scatter plot
plt.clf()
x_jitter = xs + np.random.normal(0, 0.15, len(xs))
plt.plot(x_jitter, ys, 'o', alpha=0.2)
# Plot the line of best fit
fx = np.array([xs.min(),xs.max()])
fy = res.intercept+res.slope*fx
plt.plot(fx, fy, '-', alpha=0.7)
plt.xlabel('Income code')
plt.ylabel('Vegetable servings per day')
plt.ylim([0, 6])
plt.show()
|
py
|
1a5dd308b8942d2a35f7f1a4cedde608b55bc563
|
# -*- coding: utf-8 -*-
__all__ = ["BlockedQuadPotential", "WindowedDiagAdapt", "WindowedFullAdapt"]
import numpy as np
from pymc3.step_methods.hmc.quadpotential import (
QuadPotential,
_WeightedVariance,
)
from scipy.linalg import LinAlgError, cholesky, solve_triangular
from .estimator import _WeightedCovariance
class BlockedQuadPotential(QuadPotential):
def __init__(self, n, groups, dtype="float64"):
self.dtype = dtype
self.n = int(n)
self.groups = groups
self.ordering = None
self.vmap = None
def set_ordering(self, ordering):
self.ordering = ordering
self.vmap = []
inds = np.arange(self.n)
for group in self.groups:
self.vmap.append(
np.concatenate(
[inds[self.ordering[v.name].slc] for v in group.variables]
)
)
def reset(self):
for group in self.groups:
group.potential.reset()
def velocity(self, x, out=None):
if out is None:
out = np.zeros_like(x)
for inds, group in zip(self.vmap, self.groups):
out[inds] = group.potential.velocity(x[inds])
return out
def energy(self, x, velocity=None):
if velocity is None:
velocity = self.velocity(x)
return 0.5 * np.dot(x, velocity)
def velocity_energy(self, x, v_out):
self.velocity(x, out=v_out)
return self.energy(x, v_out)
def random(self):
out = np.empty(self.n)
for inds, group in zip(self.vmap, self.groups):
out[inds] = group.potential.random()
return out
def update(self, sample, grad, tune):
if not tune:
return
for inds, group in zip(self.vmap, self.groups):
group.potential.update(sample[inds], grad[inds], tune)
def raise_ok(self, vmap):
for group in self.groups:
group.potential.raise_ok(vmap)
class WindowedDiagAdapt(QuadPotential):
def __init__(
self,
ndim,
update_steps=None,
recompute_interval=1,
regularization_steps=0,
regularization_variance=1e-8,
dtype="float64",
):
self.dtype = dtype
self._ndim = int(ndim)
if update_steps is not None:
self._update_steps = np.atleast_1d(update_steps).astype(int)
else:
self._update_steps = np.array([], dtype=int)
self._recompute_interval = int(recompute_interval)
self._regularization_steps = int(regularization_steps)
self._regularization_variance = float(regularization_variance)
self.reset()
def reset(self):
self._n_samples = 0
self.new_variance()
self.update_factors()
self._foreground = self.new_estimator()
self._background = self.new_estimator()
def update(self, sample, grad, tune):
if not tune:
return
self._n_samples += 1
# If we're in warmup or cooldown, we shouldn't update the variance
if (
self._n_samples <= self._update_steps[0]
or self._n_samples > self._update_steps[-1]
):
return
# Add the sample to the estimators
self._foreground.add_sample(sample, weight=1)
self._background.add_sample(sample, weight=1)
# During the first slow window, never update the variance estimate
if self._n_samples < self._update_steps[1]:
return
# If this is one of the update steps, update the estimators
if self._n_samples in self._update_steps:
self._foreground = self._background
self._background = self.new_estimator()
self.update_var()
# Update the variance every `recompute_interval` steps
elif (
self._recompute_interval
and self._n_samples % self._recompute_interval == 0
):
self.update_var()
def set_var(self, var):
self._var = var
self.update_factors()
def update_var(self):
self._foreground.current_variance(out=self._var)
if self._regularization_steps > 0:
N = self._foreground.n_samples
n = self._regularization_steps
self._var *= N / (N + n)
self._var[self._diag_inds] += (
self._regularization_variance * n / (N + n)
)
self.update_factors()
def energy(self, x, velocity=None):
if velocity is None:
velocity = self.velocity(x)
return 0.5 * x.dot(velocity)
def velocity_energy(self, x, v_out):
self.velocity(x, out=v_out)
return 0.5 * np.dot(x, v_out)
#
# The following methods should be overloaded by subclasses
#
def new_estimator(self):
return _WeightedVariance(self._ndim, dtype=self.dtype)
def new_variance(self):
self._var = np.ones(self._ndim, dtype=self.dtype)
self._diag_inds = np.arange(self._ndim)
def update_factors(self):
self._inv_sd = 1.0 / np.sqrt(self._var)
def velocity(self, x, out=None):
return np.multiply(self._var, x, out=out)
def random(self):
vals = np.random.normal(size=self._ndim).astype(self.dtype)
return self._inv_sd * vals
def raise_ok(self, vmap):
if np.any(~np.isfinite(self._inv_sd)):
raise ValueError("non-finite inverse variances found")
class WindowedFullAdapt(WindowedDiagAdapt):
def new_estimator(self):
return _WeightedCovariance(self._ndim, dtype=self.dtype)
def new_variance(self):
self._var = np.eye(self._ndim, dtype=self.dtype)
self._diag_inds = np.diag_indices(self._ndim)
def update_factors(self):
try:
self._chol = cholesky(self._var, lower=True)
except (LinAlgError, ValueError) as error:
self._chol_error = error
else:
self._chol_error = None
def velocity(self, x, out=None):
return np.dot(self._var, x, out=out)
def random(self):
vals = np.random.normal(size=self._ndim).astype(self.dtype)
return solve_triangular(self._chol.T, vals, overwrite_b=True)
def raise_ok(self, vmap):
if self._chol_error is not None:
raise ValueError("{0}".format(self._chol_error))
|
py
|
1a5dd383a6250ac1ea04d3777088e5170892e1d0
|
from django.contrib.auth import login
from rest_framework import serializers, status
from rest_framework.views import APIView
from rest_framework.exceptions import ValidationError
from drf_yasg.utils import swagger_auto_schema
from drf_yasg import openapi
from common.response import create_response, create_response_dict
from your_project.users.selectors import user_get_login_data, user_get_by_email
from your_project.users.services import user_create
from ..services import user_create_access_token
example_responses = {
"201": openapi.Response(
description="Successful creation of User",
examples={
"application/json": create_response_dict(
data={
"id": 1,
"email": "[email protected]",
"access": "exxddnjsjkdkdcdkdkcdkcdncdkndkk...",
},
status=status.HTTP_201_CREATED,
)
},
),
"400": openapi.Response(
description="User already exising!",
examples={
"application/json": {
"errors": [{"message": "User already existing!", "code": "invalid"}],
"data": None,
"statusCode": 400,
}
},
),
}
class InputSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField()
class Meta:
ref_name = "UserSignUpInputSerializer"
class UserSignUp(APIView):
@swagger_auto_schema(
security=[],
request_body=InputSerializer,
responses=example_responses,
)
def post(self, request):
serializer = InputSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
request_data = serializer.validated_data
user = user_get_by_email(email=request_data["email"])
if user is None:
user = user_create(
email=request_data["email"], password=request_data["password"]
)
login(request=request, user=user)
return create_response(
data={
**user_get_login_data(user=user),
**user_create_access_token(user=user),
},
status=status.HTTP_201_CREATED,
)
else:
raise ValidationError(detail="User already existing!")
|
py
|
1a5dd3bd30bdb65c2240bcb264804f03df9e8c03
|
import hashlib
import random
import json
import os
import os.path
import redleader.util as util
class Resource(object):
def __init__(self, context, cf_params):
super(Resource, self).__init__()
self._context = context
self._dependencies = []
self._cf_params = cf_params
self._user_id = None
self._multiplicity_uid = None
self._generated_id = None
self._generated_resources = None
def get_dependencies(self):
return self._dependencies
def is_static(self):
"""
Static resources only generate cloud formation templates
when they don't already exist. Example uses: S3 buckets, SQS queues
"""
return False
def add_dependency(self, dep):
"""
Resource dependencies will be included in the cloud formation
`DependsOn` attribute to ensure correct creation order.
Dependencies must be added to the cluster or generated
as subresources of included resources.
"""
self._dependencies.append(dep)
def get_id(self):
"""
Each resource needs a reproducible UID that represents its state and multiplicty
State: If a key parameter to a resource changes, it's a different resource
Multiplicity: We need to be able to differentiate identical resources. e.g) t2.micro Instance #2 vs #3
Solution:
* Utilize _get_multiplicity to track # of identical resources produced
* Utilize _idempotent_params() to get a subset of a resource's
cloud formation template output, and hash it.
Implications:
* get_id() cannot be used inside of _cloud_formation_template()
** Instead, we'll output the placeholder {resource_id}
"""
if self._user_id is not None:
return self._user_id
if self._generated_id is None:
class_name = str(self.__class__.__name__).replace("Resource", "")
param_hash = self._param_hash()
if self._multiplicity_uid is None:
self._multiplicity_uid = Resource._get_multiplicity(class_name + param_hash)
self._generated_id = "RL%sN%sP%s" % (class_name,
self._multiplicity_uid,
param_hash)
if self._context.pretty_names():
h = hashlib.md5()
h.update(self._generated_id.encode('utf-8'))
ints = []
for x in range(2):
ints.append(int(h.hexdigest()[x * 8:(x+ 1) * 8], 16))
d = self._context.get_dict()
pretty_words = ""
for i in ints:
word = d[i % len(d)].lower().replace("'", "")
pretty_words += word[0].upper() + word[1:].lower()
self._generated_id = "%s%s%s" % (class_name, self._multiplicity_uid, pretty_words)
return self._generated_id
def _id_placeholder(self):
"""
Placeholder for use in _cloud_formation_template()
"""
return "{resource_id}"
def _param_hash(self):
key_params = self._idempotent_params()
template = self._cloud_formation_template()
extracted = {}
for k in key_params:
extracted[k] = template['Properties'][k]
h = hashlib.md5()
extracted_json = json.dumps(extracted, sort_keys=True)
h.update(str(extracted_json).encode('utf-8'))
return str(h.hexdigest()[0:10])
@classmethod
def _get_multiplicity(cls, uid):
if(not hasattr(cls, "_multiplicity_count")):
cls._multiplicity_count = {}
if uid in cls._multiplicity_count:
cls._multiplicity_count[uid] += 1
else:
cls._multiplicity_count[uid] = 1
return cls._multiplicity_count[uid]
@classmethod
def reset_multiplicity(cls):
cls._multiplicity_count = {}
def _idempotent_params(self):
"""
Returns the list of cloud formation parameters that must be the same
in order for two RedLeader resources to refer to the same deployed resource.
By default we assume that all parameters must be the same.
Example: we might change an EC2 instance's security group, but want the
RedLeader resource to refer to the same deployed server.
"""
template = self._cloud_formation_template()
return sorted(template['Properties'].keys())
def iam_service_policies(self):
"""
Return a list of objects usable by IAMRoleResource to generate
an IAM role representing access to this resource and its sub resources
"""
policies = []
for resource in self.generate_sub_resources():
policies += resource.iam_service_policies()
policies.append(self._iam_service_policy())
return policies
def _iam_service_policy(self):
raise NotImplementedError
def generate_sub_resources(self):
if self._generated_resources is None:
self._generated_resources = self._generate_sub_resources()
return self._generated_resources
def _generate_sub_resources(self):
"""
Generate any sub resources, if necessary
"""
return []
@staticmethod
def cf_ref(resource):
if resource is None:
return {}
return {"Ref": resource.get_id()}
@staticmethod
def cf_attr(resource, attr):
return {"Fn::GetAtt": [ resource.get_id(), attr ]}
@staticmethod
def replaceValues(obj, replaceMap):
if isinstance(obj, dict):
for key in obj:
if isinstance(obj[key], str):
obj[key] = util.multireplace(obj[key], replaceMap)
else:
obj[key] = Resource.replaceValues(obj[key], replaceMap)
if isinstance(obj, list):
new = []
for elem in obj:
if isinstance(elem, str) and elem in replaceMap:
new.append(replaceMap[elem])
else:
new.append(Resource.replaceValues(elem, replaceMap))
return new
return obj
def cloud_formation_template(self):
"""
Get the cloud formation template for this resource
"""
if(self.is_static() and self.resource_exists()):
# Don't create templates for static resources that exist
return None
cf_template = self._cloud_formation_template()
if cf_template is None:
return None
for param in self._cf_params:
cf_template['Properties'][param] = self._cf_params[param]
replaceMap = {"{resource_id}": self.get_id()}
for param in cf_template['Properties']:
cf_template['Properties'] = Resource.replaceValues(cf_template['Properties'], replaceMap)
cf_template["DependsOn"] = []
for dependency in self.get_dependencies():
if(not dependency.is_static() or not dependency.resource_exists()):
cf_template["DependsOn"].append(dependency.get_id())
cf_template["DependsOn"] = sorted(cf_template["DependsOn"])
if self.is_static():
# Don't delete static resources on cluster deletion
cf_template["DeletionPolicy"] = "Retain"
return cf_template
def find_deployed_resources(self):
"""
Finds already deployed resources that match this resource's configuration
"""
raise NotImplementedError
class CustomUserResource(Resource):
"""
CustomUserResource allows a cluster to provision and depend upon
resources that aren't yet implemented programatically
"""
def __init__(self, context, template):
super(self, context)
self._template = template
def cloud_formation_template(self):
"""
Get the cloud formation template for this resource
"""
return self._template
|
py
|
1a5dd4003538f0aed2987a16437ee90a32d8a82d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-HU-astro/ampel/contrib/hu/util/ned.py
# License: BSD-3-Clause
# Author: valery brinnel <[email protected]>
# Date: 14.09.2021
# Last Modified Date: 14.09.2021
# Last Modified By: valery brinnel <[email protected]>
from typing import Tuple
from ampel.protocol.LoggerProtocol import LoggerProtocol
def check_ned_res(
cat_res: dict,
logger: LoggerProtocol,
spectroscopic: bool = False,
z_range: None | tuple[float | float] = None
) -> bool:
if not cat_res.get('z'):
logger.info("No redshift found in NED result")
return True
if spectroscopic and cat_res.get('n_spectra', 0) == 0 and cat_res["zflag"] != "SPEC":
logger.info("Not a spectroscopic redshift")
return True
if z_range and (cat_res['z'] < z_range[0] or cat_res['z'] > z_range[1]):
logger.info("Redshift exceeds allowed values")
return True
return False
|
py
|
1a5dd5d1de3392e043a16599454fee18e7ca9098
|
import logging
import os
import pytest
import sys
log_level = os.getenv('TEST_LOG_LEVEL', 'INFO').upper()
log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'EXCEPTION')
assert log_level in log_levels, \
'{} is not a valid log level. Use one of: {}'.format(log_level,
', '.join(log_levels))
# write everything to stdout due to the following circumstances:
# - shakedown uses print() aka stdout
# - teamcity splits out stdout vs stderr into separate outputs, we'd want them combined
logging.basicConfig(
format='[%(asctime)s|%(name)s|%(levelname)s]: %(message)s',
level=log_level,
stream=sys.stdout)
def pytest_addoption(parser):
parser.addoption('--masters', action='store', default=1, type=int,
help='Number of Jenkins masters to launch.')
parser.addoption('--jobs', action='store', default=1, type=int,
help='Number of test jobs to launch.')
parser.addoption('--single-use', action='store_true',
help='Use Mesos Single-Use agents')
parser.addoption('--run-delay', action='store', default=1,
type=int, help='Run job every X minutes.')
parser.addoption('--cpu-quota', action='store', default=0.0,
type=float, help='CPU quota to set. 0.0 to set no'
' quota.')
parser.addoption('--work-duration', action='store', default=600,
type=int, help='Duration, in seconds, for the '
'workload to last (sleep).')
parser.addoption('--mom', action='store', default='',
help='Marathon on Marathon instance name.')
parser.addoption('--external-volume', action='store_true',
help='Use rexray external volumes.')
parser.addoption('--scenario', action='store', default='sleep',
help='Test scenario to run (sleep, buildmarathon) '
'(default: sleep).')
parser.addoption('--min', action='store', default=-1,
help='min jenkins index to start from'
'(default: -1).')
parser.addoption('--max', action='store', default=-1,
help='max jenkins index to end at'
'(default: -1).')
parser.addoption('--batch-size', action='store', default=1,
help='batch size to deploy jenkins masters in'
'(default: 1).')
@pytest.fixture
def master_count(request) -> int:
return int(request.config.getoption('--masters'))
@pytest.fixture
def job_count(request) -> int:
return int(request.config.getoption('--jobs'))
@pytest.fixture
def single_use(request) -> bool:
return bool(request.config.getoption('--single-use'))
@pytest.fixture
def run_delay(request) -> int:
return int(request.config.getoption('--run-delay'))
@pytest.fixture
def cpu_quota(request) -> float:
return float(request.config.getoption('--cpu-quota'))
@pytest.fixture
def work_duration(request) -> int:
return int(request.config.getoption('--work-duration'))
@pytest.fixture
def mom(request) -> str:
return request.config.getoption('--mom')
@pytest.fixture
def scenario(request) -> str:
return request.config.getoption('--scenario')
@pytest.fixture
def external_volume(request) -> bool:
return bool(request.config.getoption('--external-volume'))
@pytest.fixture
def min_index(request) -> int:
return int(request.config.getoption('--min'))
@pytest.fixture
def max_index(request) -> int:
return int(request.config.getoption('--max'))
@pytest.fixture
def batch_size(request) -> int:
return int(request.config.getoption('--batch-size'))
|
py
|
1a5dd74ea2900ac2fae10b7448272fe4a7d37c7e
|
#! /usr/bin/python
#
# File: SMJobBlessUtil.py
#
# Contains: Tool for checking and correcting apps that use SMJobBless.
#
# Written by: DTS
#
# Copyright: Copyright (c) 2012 Apple Inc. All Rights Reserved.
#
# Disclaimer: IMPORTANT: This Apple software is supplied to you by Apple Inc.
# ("Apple") in consideration of your agreement to the following
# terms, and your use, installation, modification or
# redistribution of this Apple software constitutes acceptance of
# these terms. If you do not agree with these terms, please do
# not use, install, modify or redistribute this Apple software.
#
# In consideration of your agreement to abide by the following
# terms, and subject to these terms, Apple grants you a personal,
# non-exclusive license, under Apple's copyrights in this
# original Apple software (the "Apple Software"), to use,
# reproduce, modify and redistribute the Apple Software, with or
# without modifications, in source and/or binary forms; provided
# that if you redistribute the Apple Software in its entirety and
# without modifications, you must retain this notice and the
# following text and disclaimers in all such redistributions of
# the Apple Software. Neither the name, trademarks, service marks
# or logos of Apple Inc. may be used to endorse or promote
# products derived from the Apple Software without specific prior
# written permission from Apple. Except as expressly stated in
# this notice, no other rights or licenses, express or implied,
# are granted by Apple herein, including but not limited to any
# patent rights that may be infringed by your derivative works or
# by other works in which the Apple Software may be incorporated.
#
# The Apple Software is provided by Apple on an "AS IS" basis.
# APPLE MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING
# WITHOUT LIMITATION THE IMPLIED WARRANTIES OF NON-INFRINGEMENT,
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, REGARDING
# THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN
# COMBINATION WITH YOUR PRODUCTS.
#
# IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT,
# INCIDENTAL OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ARISING IN ANY WAY
# OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION
# OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY
# OF CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR
# OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import sys
import os
import getopt
import subprocess
import plistlib
import operator
import pdb
class UsageException (Exception):
"""
Raised when the progam detects a usage issue; the top-level code catches this
and prints a usage message.
"""
pass
class CheckException (Exception):
"""
Raised when the "check" subcommand detects a problem; the top-level code catches
this and prints a nice error message.
"""
def __init__(self, message, path=None):
self.message = message
self.path = path
def checkCodeSignature(programPath, programType):
"""Checks the code signature of the referenced program."""
# Use the codesign tool to check the signature. The second "-v" is required to enable
# verbose mode, which causes codesign to do more checking. By default it does the minimum
# amount of checking ("Is the program properly signed?"). If you enabled verbose mode it
# does other sanity checks, which we definitely want. The specific thing I'd like to
# detect is "Does the code satisfy its own designated requirement?" and I need to enable
# verbose mode to get that.
args = [
# "false",
"codesign",
"-v",
"-v",
programPath
]
try:
subprocess.check_call(args, stderr=open("/dev/null"))
except subprocess.CalledProcessError, e:
raise CheckException("%s code signature invalid" % programType, programPath)
def readDesignatedRequirement(programPath, programType):
"""Returns the designated requirement of the program as a string."""
args = [
# "false",
"codesign",
"-d",
"-r",
"-",
programPath
]
try:
req = subprocess.check_output(args, stderr=open("/dev/null"))
except subprocess.CalledProcessError, e:
raise CheckException("%s designated requirement unreadable" % programType, programPath)
reqLines = req.splitlines()
if len(reqLines) != 1 or not req.startswith("designated => "):
raise CheckException("%s designated requirement malformed" % programType, programPath)
return reqLines[0][len("designated => "):]
def readInfoPlistFromPath(infoPath):
"""Reads an "Info.plist" file from the specified path."""
try:
info = plistlib.readPlist(infoPath)
except:
raise CheckException("'Info.plist' not readable", infoPath)
if not isinstance(info, dict):
raise CheckException("'Info.plist' root must be a dictionary", infoPath)
return info
def readPlistFromToolSection(toolPath, segmentName, sectionName):
"""Reads a dictionary property list from the specified section within the specified executable."""
# Run otool -s to get a hex dump of the section.
args = [
# "false",
"otool",
"-s",
segmentName,
sectionName,
toolPath
]
try:
plistDump = subprocess.check_output(args)
except subprocess.CalledProcessError, e:
raise CheckException("tool %s / %s section unreadable" % (segmentName, sectionName), toolPath)
# Convert that hex dump to an property list.
plistLines = plistDump.splitlines()
if len(plistLines) < 3 or plistLines[1] != ("Contents of (%s,%s) section" % (segmentName, sectionName)):
raise CheckException("tool %s / %s section dump malformed (1)" % (segmentName, sectionName), toolPath)
del plistLines[0:2]
try:
bytes = []
for line in plistLines:
# line looks like this:
#
# '0000000100000b80\t3c 3f 78 6d 6c 20 76 65 72 73 69 6f 6e 3d 22 31 '
columns = line.split("\t")
assert len(columns) == 2
for hexStr in columns[1].split():
bytes.append(int(hexStr, 16))
plist = plistlib.readPlistFromString(bytearray(bytes))
except:
raise CheckException("tool %s / %s section dump malformed (2)" % (segmentName, sectionName), toolPath)
# Check the root of the property list.
if not isinstance(plist, dict):
raise CheckException("tool %s / %s property list root must be a dictionary" % (segmentName, sectionName), toolPath)
return plist
def checkStep1(appPath):
"""Checks that the app and the tool are both correctly code signed."""
if not os.path.isdir(appPath):
raise CheckException("app not found", appPath)
# Check the app's code signature.
checkCodeSignature(appPath, "app")
# Check the tool directory.
toolDirPath = os.path.join(appPath, "Contents", "Library", "LaunchServices")
if not os.path.isdir(toolDirPath):
raise CheckException("tool directory not found", toolDirPath)
# Check each tool's code signature.
toolPathList = []
for toolName in os.listdir(toolDirPath):
if toolName != ".DS_Store":
toolPath = os.path.join(toolDirPath, toolName)
if not os.path.isfile(toolPath):
raise CheckException("tool directory contains a directory", toolPath)
checkCodeSignature(toolPath, "tool")
toolPathList.append(toolPath)
# Check that we have at least one tool.
if len(toolPathList) == 0:
raise CheckException("no tools found", toolDirPath)
return toolPathList
def checkStep2(appPath, toolPathList):
"""Checks the SMPrivilegedExecutables entry in the app's "Info.plist"."""
# Create a map from the tool name (not path) to its designated requirement.
toolNameToReqMap = dict()
for toolPath in toolPathList:
req = readDesignatedRequirement(toolPath, "tool")
toolNameToReqMap[os.path.basename(toolPath)] = req
# Read the Info.plist for the app and extract the SMPrivilegedExecutables value.
infoPath = os.path.join(appPath, "Contents", "Info.plist")
info = readInfoPlistFromPath(infoPath)
if not info.has_key("SMPrivilegedExecutables"):
raise CheckException("'SMPrivilegedExecutables' not found", infoPath)
infoToolDict = info["SMPrivilegedExecutables"]
if not isinstance(infoToolDict, dict):
raise CheckException("'SMPrivilegedExecutables' must be a dictionary", infoPath)
# Check that the list of tools matches the list of SMPrivilegedExecutables entries.
if sorted(infoToolDict.keys()) != sorted(toolNameToReqMap.keys()):
raise CheckException("'SMPrivilegedExecutables' and tools in 'Contents/Library/LaunchServices' don't match")
# Check that all the requirements match.
# This is an interesting policy choice. Technically the tool just needs to match
# the requirement listed in SMPrivilegedExecutables, and we can check that by
# putting the requirement into tmp.req and then running
#
# $ codesign -v -R tmp.req /path/to/tool
#
# However, for a Developer ID signed tool we really want to have the SMPrivilegedExecutables
# entry contain the tool's designated requirement because Xcode has built a
# more complex DR that does lots of useful and important checks. So, as a matter
# of policy we require that the value in SMPrivilegedExecutables match the tool's DR.
for toolName in infoToolDict:
if infoToolDict[toolName] != toolNameToReqMap[toolName]:
raise CheckException("tool designated requirement (%s) doesn't match entry in 'SMPrivilegedExecutables' (%s)" % (toolNameToReqMap[toolName], infoToolDict[toolName]))
def checkStep3(appPath, toolPathList):
"""Checks the "Info.plist" embedded in each helper tool."""
# First get the app's designated requirement.
appReq = readDesignatedRequirement(appPath, "app")
# Then check that the tool's SMAuthorizedClients value matches it.
for toolPath in toolPathList:
info = readPlistFromToolSection(toolPath, "__TEXT", "__info_plist")
if not info.has_key("CFBundleInfoDictionaryVersion") or info["CFBundleInfoDictionaryVersion"] != "6.0":
raise CheckException("'CFBundleInfoDictionaryVersion' in tool __TEXT / __info_plist section must be '6.0'", toolPath)
if not info.has_key("CFBundleIdentifier") or info["CFBundleIdentifier"] != os.path.basename(toolPath):
raise CheckException("'CFBundleIdentifier' in tool __TEXT / __info_plist section must match tool name", toolPath)
if not info.has_key("SMAuthorizedClients"):
raise CheckException("'SMAuthorizedClients' in tool __TEXT / __info_plist section not found", toolPath)
infoClientList = info["SMAuthorizedClients"]
if not isinstance(infoClientList, list):
raise CheckException("'SMAuthorizedClients' in tool __TEXT / __info_plist section must be an array", toolPath)
if len(infoClientList) != 1:
raise CheckException("'SMAuthorizedClients' in tool __TEXT / __info_plist section must have one entry", toolPath)
# Again, as a matter of policy we require that the SMAuthorizedClients entry must
# match exactly the designated requirement of the app.
if infoClientList[0] != appReq:
raise CheckException("app designated requirement (%s) doesn't match entry in 'SMAuthorizedClients' (%s)" % (appReq, infoClientList[0]), toolPath)
def checkStep4(appPath, toolPathList):
"""Checks the "launchd.plist" embedded in each helper tool."""
for toolPath in toolPathList:
launchd = readPlistFromToolSection(toolPath, "__TEXT", "__launchd_plist")
if not launchd.has_key("Label") or launchd["Label"] != os.path.basename(toolPath):
raise CheckException("'Label' in tool __TEXT / __launchd_plist section must match tool name", toolPath)
# We don't need to check that the label matches the bundle identifier because
# we know it matches the tool name and step 4 checks that the tool name matches
# the bundle identifier.
def checkStep5(appPath):
"""There's nothing to do here; we effectively checked for this is steps 1 and 2."""
pass
def check(appPath):
"""Checks the SMJobBless setup of the specified app."""
# Each of the following steps matches a bullet point in the SMJobBless header doc.
toolPathList = checkStep1(appPath)
checkStep2(appPath, toolPathList)
checkStep3(appPath, toolPathList)
checkStep4(appPath, toolPathList)
checkStep5(appPath)
def setreq(appPath, appInfoPlistPath, toolInfoPlistPaths):
"""
Reads information from the built app and uses it to set the SMJobBless setup
in the specified app and tool Info.plist source files.
"""
#pdb.set_trace()
if not os.path.isdir(appPath):
raise CheckException("app not found", appPath)
if not os.path.isfile(appInfoPlistPath):
raise CheckException("app 'Info.plist' not found", appInfoPlistPath)
for toolInfoPlistPath in toolInfoPlistPaths:
if not os.path.isfile(toolInfoPlistPath):
raise CheckException("app 'Info.plist' not found", toolInfoPlistPath)
# Get the designated requirement for the app and each of the tools.
appReq = readDesignatedRequirement(appPath, "app")
toolDirPath = os.path.join(appPath, "Contents", "Library", "LaunchServices")
if not os.path.isdir(toolDirPath):
raise CheckException("tool directory not found", toolDirPath)
toolNameToReqMap = {}
for toolName in os.listdir(toolDirPath):
req = readDesignatedRequirement(os.path.join(toolDirPath, toolName), "tool")
toolNameToReqMap[toolName] = req
if len(toolNameToReqMap) > len(toolInfoPlistPaths):
raise CheckException("tool directory has more tools (%d) than you've supplied tool 'Info.plist' paths (%d)" % (len(toolNameToReqMap), len(toolInfoPlistPaths)), toolDirPath)
if len(toolNameToReqMap) < len(toolInfoPlistPaths):
raise CheckException("tool directory has fewer tools (%d) than you've supplied tool 'Info.plist' paths (%d)" % (len(toolNameToReqMap), len(toolInfoPlistPaths)), toolDirPath)
# Build the new value for SMPrivilegedExecutables.
appToolDict = {}
toolInfoPlistPathToToolInfoMap = {}
for toolInfoPlistPath in toolInfoPlistPaths:
toolInfo = readInfoPlistFromPath(toolInfoPlistPath)
toolInfoPlistPathToToolInfoMap[toolInfoPlistPath] = toolInfo
if not toolInfo.has_key("CFBundleIdentifier"):
raise CheckException("'CFBundleIdentifier' not found", toolInfoPlistPath)
bundleID = toolInfo["CFBundleIdentifier"]
if not isinstance(bundleID, basestring):
raise CheckException("'CFBundleIdentifier' must be a string", toolInfoPlistPath)
appToolDict[bundleID] = toolNameToReqMap[bundleID]
# Set the SMPrivilegedExecutables value in the app "Info.plist".
appInfo = readInfoPlistFromPath(appInfoPlistPath)
needsUpdate = not appInfo.has_key("SMPrivilegedExecutables")
if not needsUpdate:
oldAppToolDict = appInfo["SMPrivilegedExecutables"]
if not isinstance(oldAppToolDict, dict):
raise CheckException("'SMPrivilegedExecutables' must be a dictionary", appInfoPlistPath)
appToolDictSorted = sorted(appToolDict.iteritems(), key=operator.itemgetter(0))
oldAppToolDictSorted = sorted(oldAppToolDict.iteritems(), key=operator.itemgetter(0))
needsUpdate = (appToolDictSorted != oldAppToolDictSorted)
if needsUpdate:
appInfo["SMPrivilegedExecutables"] = appToolDict
plistlib.writePlist(appInfo, appInfoPlistPath)
print >> sys.stdout, "%s: updated" % appInfoPlistPath
# Set the SMAuthorizedClients value in each tool's "Info.plist".
toolAppListSorted = [ appReq ] # only one element, so obviously sorted (-:
for toolInfoPlistPath in toolInfoPlistPaths:
toolInfo = toolInfoPlistPathToToolInfoMap[toolInfoPlistPath]
needsUpdate = not toolInfo.has_key("SMAuthorizedClients")
if not needsUpdate:
oldToolAppList = toolInfo["SMAuthorizedClients"]
if not isinstance(oldToolAppList, list):
raise CheckException("'SMAuthorizedClients' must be an array", toolInfoPlistPath)
oldToolAppListSorted = sorted(oldToolAppList)
needsUpdate = (toolAppListSorted != oldToolAppListSorted)
if needsUpdate:
toolInfo["SMAuthorizedClients"] = toolAppListSorted
plistlib.writePlist(toolInfo, toolInfoPlistPath)
print >> sys.stdout, "%s: updated" % toolInfoPlistPath
def main():
#pdb.set_trace()
options, appArgs = getopt.getopt(sys.argv[1:], "d")
debug = False
for opt, val in options:
if opt == "-d":
debug = True
else:
raise UsageException()
if len(appArgs) == 0:
raise UsageException()
command = appArgs[0]
if command == "check":
if len(appArgs) != 2:
raise UsageException()
check(appArgs[1])
elif command == "setreq":
if len(appArgs) < 4:
raise UsageException()
setreq(appArgs[1], appArgs[2], appArgs[3:])
else:
raise UsageException()
if __name__ == "__main__":
try:
main()
except CheckException, e:
if e.path is None:
print >> sys.stderr, "%s: %s" % (os.path.basename(sys.argv[0]), e.message)
else:
path = e.path
if path.endswith("/"):
path = path[:-1]
print >> sys.stderr, "%s: %s" % (path, e.message)
sys.exit(1)
except UsageException, e:
print >> sys.stderr, "usage: %s check /path/to/app" % os.path.basename(sys.argv[0])
print >> sys.stderr, " %s setreq /path/to/app /path/to/app/Info.plist /path/to/tool/Info.plist..." % os.path.basename(sys.argv[0])
sys.exit(1)
|
py
|
1a5dd77af680707d37650fbaa6250f0830f1941f
|
# a = 42
# print(type(a))
# a = str(a)
# print(type(a))
a = 42.3
print(type(a))
a = str(a)
print(type(a))
|
py
|
1a5dd79ae8c5f8ad134401e5d00801806d81e6a6
|
import numpy as np
from math import sqrt, exp
from planet_finder.grid import Grid
class Kriging:
def __init__(self, heat_map):
self.nugget = 0
self.range = 8 #1/3 of range
self.sill = 12
self.sv_matrix = None
self.lag_matrix = None
self.heat_map = heat_map
self.pp = None
self.ppsv = None
self.weights = None
self.points = []
self.pp_z = 0
self.z_matrix = None
self.pp_error = 0
self.pX = 0
self.pY = 0
def update_heat_map(self, heat_map):
self.heat_map = heat_map
def get_points(self):
for y0 in range(self.heat_map.height):
for x0 in range(self.heat_map.width):
if self.heat_map.cells[x0][y0] >= 1:
self.points.append([x0, y0])
def calculate_lag_matrix(self):
self.lag_matrix = np.zeros((len(self.points), len(self.points)), dtype=float)
row = 0
column = 0
for p0 in self.points:
for p1 in self.points:
lag = sqrt(pow(p0[0] - p1[0], 2) + pow(p0[1] - p1[1], 2))
self.lag_matrix[row][column] = lag
column += 1
row += 1
column = 0
def calculate_sv_matrix(self):
sv = lambda t: self.nugget + self.sill*(1 - exp(-t/self.range)) if t != 0 else 0
self.sv_matrix = np.array([[sv(h) for h in row] for row in self.lag_matrix])
self.sv_matrix = np.c_[self.sv_matrix, np.zeros(len(self.points))]
self.sv_matrix = np.c_[self.sv_matrix, np.zeros(len(self.points))]
self.sv_matrix = np.c_[self.sv_matrix, np.zeros(len(self.points))]
self.sv_matrix = np.r_[self.sv_matrix, [np.zeros(len(self.points)+3)]]
self.sv_matrix = np.r_[self.sv_matrix, [np.zeros(len(self.points)+3)]]
self.sv_matrix = np.r_[self.sv_matrix, [np.zeros(len(self.points)+3)]]
num_rows = len(self.points) + 3
num_colmuns = len(self.points) + 3
count = 0
for point in self.points:
self.sv_matrix[num_rows-1][count] = point[1]
self.sv_matrix[num_rows-2][count] = point[0]
self.sv_matrix[num_rows-3][count] = 1
self.sv_matrix[count][num_colmuns-1] = point[1]
self.sv_matrix[count][num_colmuns-2] = point[0]
self.sv_matrix[count][num_colmuns-3] = 1
count += 1
def calculate_prediction_point(self, pX, pY):
pp_lag = lambda t: sqrt(pow(t[0] - pX, 2) + pow(t[1] - pY, 2))
self.pp = np.array([pp_lag(row) for row in self.points])
self.pX = pX
self.pY = pY
def calculate_sv_pp(self):
# ppsv = lambda t: self.sill*(1 - exp(-t/self.range)) if t < self.range and t != 0 else 0
ppsv = lambda t: self.nugget + self.sill*(1 - exp(-t/self.range)) if t != 0 else 0
self.ppsv = np.array([ppsv(h) for h in self.pp])
self.ppsv = np.r_[self.ppsv, np.ones(3)]
rows = len(self.ppsv)
self.ppsv[rows - 2] = self.pX
self.ppsv[rows - 1] = self.pY
def calculate_weights(self):
try:
temp = np.linalg.inv(self.sv_matrix)
self.weights = np.dot(temp, self.ppsv)
self.pp_error = np.dot(self.ppsv, self.weights)
self.weights = np.delete(self.weights, -1, 0)
self.weights = np.delete(self.weights, -1, 0)
self.weights = np.delete(self.weights, -1, 0)
return True
except Exception as err:
print("Error")
print(err)
return False
def calculate_z(self):
z = lambda t: self.heat_map.cells[t[0]][t[1]]
self.z_matrix = np.array([z(p) for p in self.points])
self.pp_z = np.inner(self.z_matrix, self.weights)
def setup(self):
self.get_points()
if len(self.points) < 3:
return False
else:
self.calculate_lag_matrix()
self.calculate_sv_matrix()
if np.linalg.det(self.sv_matrix) == 0:
return False
else:
return True
return True
def get_estimate(self, x, y):
self.calculate_prediction_point(x, y)
self.calculate_sv_pp()
if self.calculate_weights():
self.calculate_z()
return [self.pp_z, self.pp_error]
else:
return []
if __name__ == "__main__":
np.set_printoptions(linewidth=300, precision=1)
heat_map = Grid(16, 16)
heat_map.init_bomb(3, 3, 10)
heat_map.cells[3][3] = 0
# heat_map.cells[0][0] = 1
# heat_map.cells[1][0] = 2
# heat_map.cells[2][0] = 4
# heat_map.cells[0][1] = 5
# heat_map.cells[0][2] = 6
# heat_map.cells[2][2] = 27
for x in range(16, 32):
for y in range(16, 32):
heat_map = Grid((x), (y))
bombX = int(heat_map.width/2)
bombY = int(heat_map.height/2)
heat_map.init_bomb(bombX, bombY)
heat_map.cells[bombX][bombY] = 0
k = Kriging(heat_map)
k.setup()
result = k.get_estimate(bombX, bombY )
print("Estimate for (%2d,%2d)" % (x, y), str("%4.1f" % result[0]), str("%4.1f" % result[1]), heat_map.cells[bombX][bombY], ' Error ' + str("%.1f" % (result[0] - 10)))
|
py
|
1a5dd7abdef46c629e42c46a00ba1484d1d43f00
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
reqs = [line.strip() for line in open('requirements/deploy.txt')]
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(name='esmvalwps',
version='1.0.1',
description='WPS processes for ESMValTool',
long_description=README + '\n\n' + CHANGES,
classifiers=classifiers,
author='Birdhouse',
author_email='',
url='http://www.esmvaltool.org/',
license="Apache License v2.0",
keywords='wps pywps conda birdhouse esmvaltool',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='esmvalwps',
install_requires=reqs,
entry_points={
'console_scripts': []
},
)
|
py
|
1a5dd816413629a53a6746ce11b8215ede9e6c6c
|
"""Fixture used in type-related test cases.
It contains class TypeInfos and Type objects.
"""
from typing import List, Optional, Tuple
from mypy.semanal_shared import set_callable_name
from mypy.types import (
Type, AnyType, NoneType, Instance, CallableType, TypeVarType, TypeType,
UninhabitedType, TypeOfAny, TypeAliasType, UnionType, LiteralType,
TypeVarLikeType
)
from mypy.nodes import (
TypeInfo, ClassDef, FuncDef, Block, ARG_POS, ARG_OPT, ARG_STAR, SymbolTable,
COVARIANT, TypeAlias, SymbolTableNode, MDEF,
)
class TypeFixture:
"""Helper class that is used as a fixture in type-related unit tests.
The members are initialized to contain various type-related values.
"""
def __init__(self, variance: int = COVARIANT) -> None:
# The 'object' class
self.oi = self.make_type_info('builtins.object') # class object
self.o = Instance(self.oi, []) # object
# Type variables (these are effectively global)
def make_type_var(name: str, id: int, values: List[Type], upper_bound: Type,
variance: int) -> TypeVarType:
return TypeVarType(name, name, id, values, upper_bound, variance)
self.t = make_type_var('T', 1, [], self.o, variance) # T`1 (type variable)
self.tf = make_type_var('T', -1, [], self.o, variance) # T`-1 (type variable)
self.tf2 = make_type_var('T', -2, [], self.o, variance) # T`-2 (type variable)
self.s = make_type_var('S', 2, [], self.o, variance) # S`2 (type variable)
self.s1 = make_type_var('S', 1, [], self.o, variance) # S`1 (type variable)
self.sf = make_type_var('S', -2, [], self.o, variance) # S`-2 (type variable)
self.sf1 = make_type_var('S', -1, [], self.o, variance) # S`-1 (type variable)
# Simple types
self.anyt = AnyType(TypeOfAny.special_form)
self.nonet = NoneType()
self.uninhabited = UninhabitedType()
# Abstract class TypeInfos
# class F
self.fi = self.make_type_info('F', is_abstract=True)
# class F2
self.f2i = self.make_type_info('F2', is_abstract=True)
# class F3(F)
self.f3i = self.make_type_info('F3', is_abstract=True, mro=[self.fi])
# Class TypeInfos
self.std_tuplei = self.make_type_info('builtins.tuple',
mro=[self.oi],
typevars=['T'],
variances=[COVARIANT]) # class tuple
self.type_typei = self.make_type_info('builtins.type') # class type
self.bool_type_info = self.make_type_info('builtins.bool')
self.functioni = self.make_type_info('builtins.function') # function TODO
self.ai = self.make_type_info('A', mro=[self.oi]) # class A
self.bi = self.make_type_info('B', mro=[self.ai, self.oi]) # class B(A)
self.ci = self.make_type_info('C', mro=[self.ai, self.oi]) # class C(A)
self.di = self.make_type_info('D', mro=[self.oi]) # class D
# class E(F)
self.ei = self.make_type_info('E', mro=[self.fi, self.oi])
# class E2(F2, F)
self.e2i = self.make_type_info('E2', mro=[self.f2i, self.fi, self.oi])
# class E3(F, F2)
self.e3i = self.make_type_info('E3', mro=[self.fi, self.f2i, self.oi])
# Generic class TypeInfos
# G[T]
self.gi = self.make_type_info('G', mro=[self.oi],
typevars=['T'],
variances=[variance])
# G2[T]
self.g2i = self.make_type_info('G2', mro=[self.oi],
typevars=['T'],
variances=[variance])
# H[S, T]
self.hi = self.make_type_info('H', mro=[self.oi],
typevars=['S', 'T'],
variances=[variance, variance])
# GS[T, S] <: G[S]
self.gsi = self.make_type_info('GS', mro=[self.gi, self.oi],
typevars=['T', 'S'],
variances=[variance, variance],
bases=[Instance(self.gi, [self.s])])
# GS2[S] <: G[S]
self.gs2i = self.make_type_info('GS2', mro=[self.gi, self.oi],
typevars=['S'],
variances=[variance],
bases=[Instance(self.gi, [self.s1])])
# list[T]
self.std_listi = self.make_type_info('builtins.list', mro=[self.oi],
typevars=['T'],
variances=[variance])
# Instance types
self.std_tuple = Instance(self.std_tuplei, [self.anyt]) # tuple
self.type_type = Instance(self.type_typei, []) # type
self.function = Instance(self.functioni, []) # function TODO
self.a = Instance(self.ai, []) # A
self.b = Instance(self.bi, []) # B
self.c = Instance(self.ci, []) # C
self.d = Instance(self.di, []) # D
self.e = Instance(self.ei, []) # E
self.e2 = Instance(self.e2i, []) # E2
self.e3 = Instance(self.e3i, []) # E3
self.f = Instance(self.fi, []) # F
self.f2 = Instance(self.f2i, []) # F2
self.f3 = Instance(self.f3i, []) # F3
# Generic instance types
self.ga = Instance(self.gi, [self.a]) # G[A]
self.gb = Instance(self.gi, [self.b]) # G[B]
self.gd = Instance(self.gi, [self.d]) # G[D]
self.go = Instance(self.gi, [self.o]) # G[object]
self.gt = Instance(self.gi, [self.t]) # G[T`1]
self.gtf = Instance(self.gi, [self.tf]) # G[T`-1]
self.gtf2 = Instance(self.gi, [self.tf2]) # G[T`-2]
self.gs = Instance(self.gi, [self.s]) # G[S]
self.gdyn = Instance(self.gi, [self.anyt]) # G[Any]
self.gn = Instance(self.gi, [NoneType()]) # G[None]
self.g2a = Instance(self.g2i, [self.a]) # G2[A]
self.gsaa = Instance(self.gsi, [self.a, self.a]) # GS[A, A]
self.gsab = Instance(self.gsi, [self.a, self.b]) # GS[A, B]
self.gsba = Instance(self.gsi, [self.b, self.a]) # GS[B, A]
self.gs2a = Instance(self.gs2i, [self.a]) # GS2[A]
self.gs2b = Instance(self.gs2i, [self.b]) # GS2[B]
self.gs2d = Instance(self.gs2i, [self.d]) # GS2[D]
self.hab = Instance(self.hi, [self.a, self.b]) # H[A, B]
self.haa = Instance(self.hi, [self.a, self.a]) # H[A, A]
self.hbb = Instance(self.hi, [self.b, self.b]) # H[B, B]
self.hts = Instance(self.hi, [self.t, self.s]) # H[T, S]
self.had = Instance(self.hi, [self.a, self.d]) # H[A, D]
self.hao = Instance(self.hi, [self.a, self.o]) # H[A, object]
self.lsta = Instance(self.std_listi, [self.a]) # List[A]
self.lstb = Instance(self.std_listi, [self.b]) # List[B]
self.lit1 = LiteralType(1, self.a)
self.lit2 = LiteralType(2, self.a)
self.lit3 = LiteralType("foo", self.d)
self.lit1_inst = Instance(self.ai, [], last_known_value=self.lit1)
self.lit2_inst = Instance(self.ai, [], last_known_value=self.lit2)
self.lit3_inst = Instance(self.di, [], last_known_value=self.lit3)
self.type_a = TypeType.make_normalized(self.a)
self.type_b = TypeType.make_normalized(self.b)
self.type_c = TypeType.make_normalized(self.c)
self.type_d = TypeType.make_normalized(self.d)
self.type_t = TypeType.make_normalized(self.t)
self.type_any = TypeType.make_normalized(self.anyt)
self._add_bool_dunder(self.bool_type_info)
self._add_bool_dunder(self.ai)
def _add_bool_dunder(self, type_info: TypeInfo) -> None:
signature = CallableType([], [], [], Instance(self.bool_type_info, []), self.function)
bool_func = FuncDef('__bool__', [], Block([]))
bool_func.type = set_callable_name(signature, bool_func)
type_info.names[bool_func.name] = SymbolTableNode(MDEF, bool_func)
# Helper methods
def callable(self, *a: Type) -> CallableType:
"""callable(a1, ..., an, r) constructs a callable with argument types
a1, ... an and return type r.
"""
return CallableType(list(a[:-1]), [ARG_POS] * (len(a) - 1),
[None] * (len(a) - 1), a[-1], self.function)
def callable_type(self, *a: Type) -> CallableType:
"""callable_type(a1, ..., an, r) constructs a callable with
argument types a1, ... an and return type r, and which
represents a type.
"""
return CallableType(list(a[:-1]), [ARG_POS] * (len(a) - 1),
[None] * (len(a) - 1), a[-1], self.type_type)
def callable_default(self, min_args: int, *a: Type) -> CallableType:
"""callable_default(min_args, a1, ..., an, r) constructs a
callable with argument types a1, ... an and return type r,
with min_args mandatory fixed arguments.
"""
n = len(a) - 1
return CallableType(list(a[:-1]),
[ARG_POS] * min_args + [ARG_OPT] * (n - min_args),
[None] * n,
a[-1], self.function)
def callable_var_arg(self, min_args: int, *a: Type) -> CallableType:
"""callable_var_arg(min_args, a1, ..., an, r) constructs a callable
with argument types a1, ... *an and return type r.
"""
n = len(a) - 1
return CallableType(list(a[:-1]),
[ARG_POS] * min_args +
[ARG_OPT] * (n - 1 - min_args) +
[ARG_STAR], [None] * n,
a[-1], self.function)
def make_type_info(self, name: str,
module_name: Optional[str] = None,
is_abstract: bool = False,
mro: Optional[List[TypeInfo]] = None,
bases: Optional[List[Instance]] = None,
typevars: Optional[List[str]] = None,
variances: Optional[List[int]] = None) -> TypeInfo:
"""Make a TypeInfo suitable for use in unit tests."""
class_def = ClassDef(name, Block([]), None, [])
class_def.fullname = name
if module_name is None:
if '.' in name:
module_name = name.rsplit('.', 1)[0]
else:
module_name = '__main__'
if typevars:
v: List[TypeVarLikeType] = []
for id, n in enumerate(typevars, 1):
if variances:
variance = variances[id - 1]
else:
variance = COVARIANT
v.append(TypeVarType(n, n, id, [], self.o, variance=variance))
class_def.type_vars = v
info = TypeInfo(SymbolTable(), class_def, module_name)
if mro is None:
mro = []
if name != 'builtins.object':
mro.append(self.oi)
info.mro = [info] + mro
if bases is None:
if mro:
# By default, assume that there is a single non-generic base.
bases = [Instance(mro[0], [])]
else:
bases = []
info.bases = bases
return info
def def_alias_1(self, base: Instance) -> Tuple[TypeAliasType, Type]:
A = TypeAliasType(None, [])
target = Instance(self.std_tuplei,
[UnionType([base, A])]) # A = Tuple[Union[base, A], ...]
AN = TypeAlias(target, '__main__.A', -1, -1)
A.alias = AN
return A, target
def def_alias_2(self, base: Instance) -> Tuple[TypeAliasType, Type]:
A = TypeAliasType(None, [])
target = UnionType([base,
Instance(self.std_tuplei, [A])]) # A = Union[base, Tuple[A, ...]]
AN = TypeAlias(target, '__main__.A', -1, -1)
A.alias = AN
return A, target
def non_rec_alias(self, target: Type) -> TypeAliasType:
AN = TypeAlias(target, '__main__.A', -1, -1)
return TypeAliasType(AN, [])
class InterfaceTypeFixture(TypeFixture):
"""Extension of TypeFixture that contains additional generic
interface types."""
def __init__(self) -> None:
super().__init__()
# GF[T]
self.gfi = self.make_type_info('GF', typevars=['T'], is_abstract=True)
# M1 <: GF[A]
self.m1i = self.make_type_info('M1',
is_abstract=True,
mro=[self.gfi, self.oi],
bases=[Instance(self.gfi, [self.a])])
self.gfa = Instance(self.gfi, [self.a]) # GF[A]
self.gfb = Instance(self.gfi, [self.b]) # GF[B]
self.m1 = Instance(self.m1i, []) # M1
|
py
|
1a5dd87be03a62a5a0b9d58ae732826cfbd7476a
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class MacroeconomicConfig(AppConfig):
name = 'macroeconomic'
|
py
|
1a5dd8ad5f040d8f29b94897359c0c9cb65d27e7
|
import json
import logging
from django.conf import settings
from azure.storage.file import FileService
from azure.common import AzureMissingResourceHttpError
from azure.storage.file.models import File as AzureFile, Directory as AzureDirectory
from azure.storage.blob import BlockBlobService
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.db.models.signals import post_save
logger = logging.getLogger('django')
class TaxonomyTerms(models.Model):
taxonomy_id = models.CharField(max_length=255, unique=True)
terms_json = models.TextField()
@receiver(post_save, sender=TaxonomyTerms)
def update_taxonomy_terms_on_blobstore(sender, instance, **kwargs):
try:
data = json.loads(instance.terms_json)
terms_with_vocab = get_terms_from_terms_json(data)
vocabs = get_vocabs_from_terms_json(data)
content = dict()
content['vocabs'] = vocabs
content['terms'] = terms_with_vocab
blobPath = f'taxonomy/{instance.taxonomy_id}.json'
blob_service = BlockBlobService(account_name=settings.AZURE_ACCOUNT_NAME, account_key=settings.AZURE_ACCOUNT_KEY)
blob_service.create_blob_from_text(settings.AZURE_CONTAINER, blobPath, to_json(content))
logger.info('Successfully wrote taxonomy json to BlobStore %s', blobPath)
except Exception as e:
logger.info('Could not build taxonomy json and send to BlobStore %s', e)
def get_terms_from_terms_json(data):
terms = dict()
level = 1
for obj in data:
if obj.get('type') == 'vocabulary':
vocab_code = obj.get('code')
children = obj.get('children', None)
if children:
child_terms = get_terms_from_children(children, vocab_code, '', level)
terms.update(child_terms)
return terms
def get_terms_from_children(children, vocab_code, index_path, level):
terms = dict()
for obj in children:
if obj.get('type') == 'term':
index_path_for_level = obj.get('code') if index_path == '' else "%s|%s" % (index_path, obj.get('code'))
terms[obj.get('code')] = { "label": obj.get('label'), "vocabCode": vocab_code, "indexPath": index_path_for_level, "level": level }
children = obj.get('children', None)
if children:
next_level = level + 1
child_terms = get_terms_from_children(children, vocab_code, index_path_for_level, next_level)
terms.update(child_terms)
return terms
def get_vocabs_from_terms_json(data):
vocabs = dict()
for obj in data:
if obj.get('type') == 'vocabulary':
vocab_code = obj.get('code')
vocab_label = obj.get('label')
vocabs[vocab_code] = vocab_label
return vocabs
def to_json(data):
return json.dumps(data)
class TaxonomyMixin(models.Model):
taxonomy_json = models.TextField(null=True, blank=True)
class Meta:
abstract = True
class PageTaxonomyPermissionsMixin(models.Model):
global_permission = models.CharField(max_length=100, null=True, blank=True, default='public')
inherit_permission = models.CharField(max_length=100, null=True, blank=True)
permissions_json = models.TextField(null=True, blank=True)
permissions_json_formatted = models.TextField(null=True, blank=True)
class Meta:
abstract = True
class ModelTaxonomyPermissionsMixin(models.Model):
permissions_json = models.TextField(null=True, blank=True)
permissions_json_formatted = models.TextField(null=True, blank=True)
class Meta:
abstract = True
# @receiver(pre_save, sender=PageTaxonomyPermissionsMixin)
def format_permissions_json(sender, instance, **kwargs):
permissions_json_formatted = {}
permissions_json_formatted = {}
for group_key, groups in (json.loads(instance.permissions_json)).items():
permissions_json_formatted[group_key] = []
for action_key, vocs in groups.items():
permissions_json_formatted[group_key].extend(['{0}.{1}'.format(action_key, voc) for voc in vocs])
instance.permissions_json_formatted = permissions_json_formatted
print(permissions_json_formatted)
return instance
|
py
|
1a5dd92f9e1e3fa50ec43098008accb26fd49f0a
|
# -*- coding: utf-8
# Core
import pytest
# Exception
from ...exceptions import InvalidInput
# Messaging context
from ... import messaging_context
@pytest.fixture
def remove_user_to_channel_lib():
return messaging_context.remove_user_to_channel()
@pytest.fixture
def add_user_to_channel_lib():
return messaging_context.add_user_to_channel()
async def test_remove_user_to_channel_user_id_required(
channel_data,
remove_user_to_channel_lib
):
errors = None
try:
await remove_user_to_channel_lib.run(
channel_instance=channel_data[3], user_id=None)
except InvalidInput:
errors = await remove_user_to_channel_lib.get_errors()
assert errors['user_id'] == ['user_id is required'], \
'Should fail if no user id'
async def test_remove_user_to_channel_user_id_invalid(
channel_data,
remove_user_to_channel_lib
):
errors = None
try:
await remove_user_to_channel_lib.run(
channel_instance=channel_data[3], user_id='asdf')
except InvalidInput:
errors = await remove_user_to_channel_lib.get_errors()
assert errors['user_id'] == ['user_id is invalid'], \
'Should fail if user_id is invalid or not integer'
async def test_remove_user_to_channel_user_id_not_found(
channel_data,
remove_user_to_channel_lib
):
errors = None
try:
await remove_user_to_channel_lib.run(
channel_instance=channel_data[3], user_id=1239123891)
except InvalidInput:
errors = await remove_user_to_channel_lib.get_errors()
assert errors['user_id'] == ['user does not exist'], \
'Should fail if user_id is not added to channel'
async def test_remove_user_to_channel(
channel_data,
remove_user_to_channel_lib,
add_user_to_channel_lib,
):
await remove_user_to_channel_lib.run(
channel_instance=channel_data[0], user_id=1)
# After removal testing is successful add it again for other testing
await add_user_to_channel_lib.run(
channel_instance=channel_data[0], user_id=1)
|
py
|
1a5dd9ac0b6fe5aa80a39c7718417f99c022b330
|
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email [email protected] or [email protected].
#
import os
from framework import Config
from ldap_setup import LdapInfo
from framework import S3PyCliTest
from auth import AuthTest
from s3client_config import S3ClientConfig
from ldap import LdapOps
# Helps debugging
# Config.log_enabled = True
# Config.dummy_run = True
# Set time_readable_format to False if you want to display the time in milli seconds.
# Config.time_readable_format = False
# Store the access keys created during the test.
# These keys should be deleted after
access_key_id = []
# Extract the response elements from response which has the following format
# <Key 1> = <Value 1>, <Key 2> = <Value 2> ... <Key n> = <Value n>
def get_response_elements(response):
response_elements = {}
key_pairs = response.split(',')
for key_pair in key_pairs:
tokens = key_pair.split('=')
response_elements[tokens[0].strip()] = tokens[1].strip()
return response_elements
# Run before all to setup the test environment.
def before_all():
print("Configuring LDAP")
S3PyCliTest('Before_all').before_all()
# Test create account and list accounts APIs
def account_tests():
account_args = {}
account_args['AccountName'] = 's3test'
account_args['Email'] = '[email protected]'
account_args['ldapuser'] = 'sgiamadmin'
account_args['ldappasswd'] = LdapInfo.get_ldap_admin_pwd()
# CREATE
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "FAIL_ONCE", 0).execute_test()
test_msg = "Create account s3test should fail if save account details fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Account wasn't created.")
LdapOps().delete_account("s3test")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "SKIP_FIRST_N_TIMES", 1).execute_test()
test_msg = "Create account s3test should fail when create user ou fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Account wasn't created.")
LdapOps().delete_account("s3test")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "SKIP_FIRST_N_TIMES", 2).execute_test()
test_msg = "Create account s3test should fail when create role ou fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Account wasn't created.")
LdapOps().delete_account("s3test")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = "Create account s3test should fail when create groups ou fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Account wasn't created.")
LdapOps().delete_account("s3test")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "SKIP_FIRST_N_TIMES", 4).execute_test()
test_msg = "Create account s3test should fail when create policy ou fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Account wasn't created.")
LdapOps().delete_account("s3test")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "SKIP_FIRST_N_TIMES", 5).execute_test()
test_msg = "Create account s3test should fail when create root user account fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Account wasn't created.")
LdapOps().delete_account("s3test")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "SKIP_FIRST_N_TIMES", 6).execute_test()
test_msg = "Create account s3test should fail when create access key for root user account fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Account wasn't created.")
LdapOps().delete_account("s3test")
# Create Account
test_msg = "Create account s3test"
account_response_pattern = "AccountId = [\w-]*, CanonicalId = [\w-]*, RootUserName = [\w+=,.@-]*, AccessKeyId = [\w-]*, SecretKey = [\w/+]*$"
auth_test = AuthTest(test_msg)
result = auth_test.create_account(**account_args).execute_test()
result.command_should_match_pattern(account_response_pattern)
account_response_elements = get_response_elements(result.status.stdout)
# Set S3ClientConfig with root credentials
S3ClientConfig.access_key_id = account_response_elements['AccessKeyId']
S3ClientConfig.secret_key = account_response_elements['SecretKey']
# Add the access key id for clean up
access_key_id.append(account_response_elements['AccessKeyId'])
test_msg = "Create account s3test should fail if the account already exist"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("The request was rejected because it attempted to create an account that already exists.")
# LIST
test_msg = "List accounts"
accounts_response_pattern = "AccountName = [\w-]*, AccountId = [\w-]*, CanonicalId = [\w-]*, Email = [\w.@]*"
auth_test = AuthTest(test_msg)
auth_test.list_account(**account_args).execute_test()\
.command_should_match_pattern(accounts_response_pattern)
# Fail to search account while creating account
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = "Create account s3test should fail if search account fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("Account wasn't created.")
test_msg = "List accounts should fail if search accounts fails"
auth_list_test = AuthTest(test_msg)
auth_list_test.list_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_list_test.command_response_should_have("Failed to list accounts!")
# Fail to get attrubutes of searched account while creating account
AuthTest("Set LDAP_GET_ATTR_FAIL fault point").inject_fault("LDAP_GET_ATTR_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = "Create account s3test should fail if find attributes of searched account fails"
auth_test = AuthTest(test_msg)
auth_test.create_account(**account_args).execute_test(negative_case=True)\
.command_response_should_have("Account wasn't created.")
test_msg = "List accounts should fail if find attributes of searched account fails"
auth_list_test = AuthTest(test_msg)
auth_list_test.list_account(**account_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_GET_ATTR_FAIL fault point").reset_fault("LDAP_GET_ATTR_FAIL").execute_test()
auth_list_test.command_response_should_have("Failed to list accounts!")
def user_tests():
user_args = {}
user_args['UserName'] = "s3user1"
# CREATE
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = "Create user s3user1 should fail if search user fails"
auth_test = AuthTest(test_msg)
auth_test.create_user(**user_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to create user.")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = "Create user should fail if save user fails"
auth_test = AuthTest(test_msg)
auth_test.create_user(**user_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Failed to create user.")
test_msg = "Create User s3user1 (default path)"
user1_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /$"
auth_test = AuthTest(test_msg)
result = auth_test.create_user(**user_args).execute_test()
result.command_should_match_pattern(user1_response_pattern)
test_msg = "Create user should fail if user already exist"
auth_test = AuthTest(test_msg)
auth_test.create_user(**user_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to create user.")
# LIST
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = "List users should fail if find users fails"
user_args = {}
auth_test = AuthTest(test_msg)
auth_test.list_users(**user_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to list users.")
# UPDATE
AuthTest("Set LDAP_UPDATE_ENTRY_FAIL fault point").inject_fault("LDAP_UPDATE_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = "Update user should fail if update fails"
user_args = {}
user_args['UserName'] = "s3user1"
user_args['NewUserName'] = "s3user11"
auth_test = AuthTest(test_msg)
auth_test.update_user(**user_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_UPDATE_ENTRY_FAIL fault point").reset_fault("LDAP_UPDATE_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Failed to update user info.")
test_msg = "Update user should fail if no parameters passed"
user_args = {}
user_args['UserName'] = "s3user1"
auth_test = AuthTest(test_msg)
auth_test.update_user(**user_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to update user info.")
test_msg = "Update user fails if new username and old username are same"
user_args = {}
user_args['UserName'] = "s3user1"
user_args['NewUserName'] = "s3user1"
auth_test = AuthTest(test_msg)
auth_test.update_user(**user_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to update user info.")
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'Update User s3user1 (new name = root, new path - /test/success) should fail'
user_args = {}
user_args['UserName'] = "s3user1"
user_args['NewUserName'] = "s3userA"
user_args['NewPath'] = "/test/success/"
auth_test = AuthTest(test_msg)
auth_test.update_user(**user_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to update user info.")
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 4).execute_test()
test_msg = 'Update User s3user1 (new name = root, new path - /test/success) should fail'
user_args = {}
user_args['UserName'] = "s3user1"
user_args['NewUserName'] = "s3userA"
user_args['NewPath'] = "/test/success/"
auth_test = AuthTest(test_msg)
auth_test.update_user(**user_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to update user info.")
test_msg = "Update user should fail if user doesn't exist"
user_args = {}
user_args['UserName'] = "noSuchUser"
user_args['NewUserName'] = "s3userA"
user_args['NewPath'] = "/test/success/"
auth_test = AuthTest(test_msg)
auth_test.update_user(**user_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to update user info.")
# DELETE
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = "Delete user s3user1 should fail if find user fails"
user_args['UserName'] = "s3user1"
auth_test = AuthTest(test_msg)
auth_test.delete_user(**user_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to delete user.")
# Fail to delete user
AuthTest("Set LDAP_DELETE_ENTRY_FAIL fault point").inject_fault("LDAP_DELETE_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = "Delete user should fail if delete operation fails"
user_args['UserName'] = "s3user1"
auth_test = AuthTest(test_msg)
auth_test.delete_user(**user_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_DELETE_ENTRY_FAIL fault point").reset_fault("LDAP_DELETE_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Failed to delete user.")
test_msg = "Create access key for user s3user1"
access_key_args = {}
access_key_args['UserName'] = 's3user1'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
auth_test = AuthTest(test_msg)
result = auth_test.create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
test_msg = "Delete user s3user1 should fail when it has access key"
auth_test = AuthTest(test_msg)
auth_test.delete_user(**user_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to delete user.")
test_msg = "Delete access key of s3user1"
auth_test = AuthTest(test_msg)
auth_test.delete_access_key(**access_key_args).execute_test()\
.command_response_should_have("Access key deleted")
test_msg = "Delete user s3user1 should succeed"
user_args['UserName'] = "s3user1"
auth_test = AuthTest(test_msg)
auth_test.delete_user(**user_args).execute_test()\
.command_response_should_have("User deleted")
test_msg = "Delete user fails when user does not exist"
user_args['UserName'] = "s3user1"
auth_test = AuthTest(test_msg)
auth_test.delete_user(**user_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to delete user.")
def create_test_user():
test_msg = "Create User s3user2 (path = /test/)"
user_args = {}
user_args['UserName'] = "s3user2"
user_args['Path'] = "/test/"
user2_response_pattern = "UserId = [\w-]*, ARN = [\S]*, Path = /test/$"
auth_test = AuthTest(test_msg)
result = auth_test.create_user(**user_args).execute_test()
result.command_should_match_pattern(user2_response_pattern)
def delete_test_user():
test_msg = 'Delete User s3user2'
user_args = {}
user_args['UserName'] = "s3user2"
auth_test = AuthTest(test_msg)
result = auth_test.delete_user(**user_args).execute_test()
result.command_response_should_have("User deleted.")
# Test create user API
# Each user can have only 2 access keys. Hence test all the APIs in the same function.
def accesskey_tests():
create_test_user()
access_key_args = {}
# CREATE
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'Create access key (user name is root) should fail'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.create_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_should_match_pattern("Failed to create access key.")
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 4).execute_test()
test_msg = 'Create access key (user name is root) should fail'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.create_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_should_match_pattern("Failed to create access key.")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = 'Create access key (user name is root) should fail'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.create_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_should_match_pattern("Failed to create access key.")
test_msg = 'Create access key (user name is root)'
access_key_args['UserName'] = 'root'
accesskey_response_pattern = "AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, Status = [\w]*$"
auth_test = AuthTest(test_msg)
result = auth_test.create_access_key(**access_key_args).execute_test()
result.command_should_match_pattern(accesskey_response_pattern)
accesskey_response_elements = get_response_elements(result.status.stdout)
access_key_args['AccessKeyId'] = accesskey_response_elements['AccessKeyId']
# LIST
test_msg = 'List access keys should without passing username'
access_key_args.pop('UserName', None)
accesskey_response_pattern = "UserName = root, AccessKeyId = [\w-]*, Status = Active"
auth_test = AuthTest(test_msg)
auth_test.list_access_keys(**access_key_args).execute_test(negative_case=True)\
.command_should_match_pattern(accesskey_response_pattern)
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'List access keys should fail'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.list_access_keys(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to list access keys.")
test_msg = "List access keys should fail if user doesn't exist"
access_key_args['UserName'] = 'noSuchUser'
auth_test = AuthTest(test_msg)
auth_test.list_access_keys(**access_key_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to list access keys.")
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 4).execute_test()
test_msg = 'List access keys should fail'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.list_access_keys(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to list access keys.")
# UPDATE
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'Update access key should fail'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.update_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to update access key.")
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 4).execute_test()
test_msg = 'Update access key should fail'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.update_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to update access key.")
AuthTest("Set LDAP_UPDATE_ENTRY_FAIL fault point").inject_fault("LDAP_UPDATE_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = 'Update access key should fail'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.update_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_UPDATE_ENTRY_FAIL fault point").reset_fault("LDAP_UPDATE_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Failed to update access key.")
test_msg = "Update access key should fail if user doesn't exist"
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 'NoSuchUser'
auth_test = AuthTest(test_msg)
auth_test.update_access_key(**access_key_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to update access key.")
test_msg = 'Update access key should fail if user is invalid'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 's3user2'
auth_test = AuthTest(test_msg)
auth_test.update_access_key(**access_key_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to update access key.")
test_msg = 'Update access key should fail if access key is invalid'
access_key_args['Status'] = "Inactive"
access_key_args['UserName'] = 'root'
ak_holder = access_key_args['AccessKeyId']
access_key_args['AccessKeyId'] = "NO-SUCH-ACCESS-KEY"
auth_test = AuthTest(test_msg)
auth_test.update_access_key(**access_key_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to update access key.")
access_key_args['AccessKeyId'] = ak_holder
# DELETE
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'Delete access key should fail'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.delete_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to delete access key")
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 4).execute_test()
test_msg = 'Delete access key should fail'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.delete_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Failed to delete access key")
test_msg = 'Delete access key should fail if username is wrong'
access_key_args['UserName'] = 's3user2'
auth_test = AuthTest(test_msg)
auth_test.delete_access_key(**access_key_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to delete access key")
AuthTest("Set LDAP_DELETE_ENTRY_FAIL fault point").inject_fault("LDAP_DELETE_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = 'Delete access key should fail if delete entry fails'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.delete_access_key(**access_key_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_DELETE_ENTRY_FAIL fault point").reset_fault("LDAP_DELETE_ENTRY_FAIL").execute_test()
auth_test .command_response_should_have("Failed to delete access key")
test_msg = 'Delete access key'
access_key_args['UserName'] = 'root'
auth_test = AuthTest(test_msg)
auth_test.delete_access_key(**access_key_args).execute_test(negative_case=True)\
.command_response_should_have("Access key deleted.")
test_msg = 'Delete access key should fail if access key is invalid'
access_key_args['UserName'] = 'root'
access_key_args['AccessKeyId'] = "INVALID-ACCESS-KEY"
auth_test = AuthTest(test_msg)
auth_test.delete_access_key(**access_key_args).execute_test(negative_case=True)\
.command_response_should_have("Failed to delete access key")
delete_test_user()
def role_tests():
policy_doc = os.path.join(os.path.dirname(__file__), 'resources', 'policy')
policy_doc_full_path = os.path.abspath(policy_doc)
# CREATE
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'Create role (Path not specified) should fail'
role_args = {}
role_args['RoleName'] = 'S3Test'
role_args['AssumeRolePolicyDocument'] = policy_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.create_role(**role_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while creating role")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = 'Create role (Path not specified) should fail if save role failed'
role_args = {}
role_args['RoleName'] = 'S3Test'
role_args['AssumeRolePolicyDocument'] = policy_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.create_role(**role_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while creating role")
test_msg = 'Create role (Path not specified)'
role_args = {}
role_args['RoleName'] = 'S3Test'
role_args['AssumeRolePolicyDocument'] = policy_doc_full_path
role_response_pattern = "RoleId = [\w-]*, RoleName = S3Test, ARN = [\S]*, Path = /$"
auth_test = AuthTest(test_msg)
auth_test.create_role(**role_args).execute_test()\
.command_should_match_pattern(role_response_pattern)
test_msg = 'Create role (Path not specified) should fail if role already exist'
role_args = {}
role_args['RoleName'] = 'S3Test'
role_args['AssumeRolePolicyDocument'] = policy_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.create_role(**role_args).execute_test(negative_case=True)\
.command_response_should_have("Exception occured while creating role")
# LIST
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'List roles should fail'
role_args = {}
role_args['RoleName'] = 'S3Test'
auth_test = AuthTest(test_msg)
auth_test.list_roles(**role_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while listing roles")
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = "Delete role should fail"
auth_test = AuthTest(test_msg)
auth_test.delete_role(**role_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while deleting role")
# DELETE
AuthTest("Set LDAP_DELETE_ENTRY_FAIL fault point").inject_fault("LDAP_DELETE_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = "Delete role should fail"
auth_test = AuthTest(test_msg)
auth_test.delete_role(**role_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_DELETE_ENTRY_FAIL fault point").reset_fault("LDAP_DELETE_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while deleting role")
test_msg = 'Delete role'
auth_test = AuthTest(test_msg)
auth_test.delete_role(**role_args).execute_test(negative_case=True)\
.command_response_should_have("Role deleted.")
test_msg = "Delete role should fail if role doesn't exist"
auth_test = AuthTest(test_msg)
auth_test.delete_role(**role_args).execute_test(negative_case=True)\
.command_response_should_have("Exception occured while deleting role")
# remove following test by making approp. changes in auth_spec
test_msg = 'Create role (Path is /test/)'
role_args['RoleName'] = 'S3Test'
role_args['Path'] = '/test/'
role_args['AssumeRolePolicyDocument'] = policy_doc_full_path
role_response_pattern = "RoleId = [\w-]*, RoleName = S3Test, ARN = [\S]*, Path = /test/$"
auth_test = AuthTest(test_msg)
auth_test.create_role(**role_args).execute_test()\
.command_should_match_pattern(role_response_pattern)
test_msg = 'List role (Path is /test)'
role_args = {}
role_args['Path'] = '/test/'
role_response_pattern = "RoleId = S3Test, RoleName = S3Test, ARN = [\S]*, Path = /test/$"
auth_test = AuthTest(test_msg)
auth_test.list_roles(**role_args).execute_test()\
.command_should_match_pattern(role_response_pattern)
test_msg = 'Delete role'
role_args['RoleName'] = 'S3Test'
auth_test = AuthTest(test_msg)
auth_test.delete_role(**role_args).execute_test()\
.command_response_should_have("Role deleted.")
def saml_provider_tests():
metadata_doc = os.path.join(os.path.dirname(__file__), 'resources', 'saml_metadata')
metadata_doc_full_path = os.path.abspath(metadata_doc)
# CREATE
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'Create SAML provider should fail'
saml_provider_args = {}
saml_provider_args['Name'] = 'S3IDP'
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.create_saml_provider(**saml_provider_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while creating saml provider")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = 'Create SAML provider should fail'
saml_provider_args = {}
saml_provider_args['Name'] = 'S3IDP'
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.create_saml_provider(**saml_provider_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while creating saml provider")
test_msg = 'Create SAML provider'
saml_provider_args = {}
saml_provider_args['Name'] = 'S3IDP'
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
saml_provider_response_pattern = "SAMLProviderArn = [\S]*$"
auth_test = AuthTest(test_msg)
result = auth_test.create_saml_provider(**saml_provider_args).execute_test()
result.command_should_match_pattern(saml_provider_response_pattern)
response_elements = get_response_elements(result.status.stdout)
saml_provider_args['SAMLProviderArn'] = response_elements['SAMLProviderArn']
test_msg = 'Create SAML provider should fail if it already exists'
saml_provider_args = {}
saml_provider_args['Name'] = 'S3IDP'
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.create_saml_provider(**saml_provider_args).execute_test(negative_case=True)\
.command_response_should_have("Exception occured while creating saml provider")
# LIST
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'List SAML providers'
saml_provider_args = {}
auth_test = AuthTest(test_msg)
auth_test.list_saml_providers(**saml_provider_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while listing SAML providers")
# UPDATE
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = 'Update SAML provider should fail'
saml_provider_args = {}
saml_provider_args['SAMLProviderArn'] = "arn:seagate:iam:::S3IDP"
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.update_saml_provider(**saml_provider_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while updating SAML provider")
test_msg = 'Update SAML provider should fail if provider is invalid'
saml_provider_args = {}
saml_provider_args['SAMLProviderArn'] = "arn:seagate:iam:::S3INVALID"
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.update_saml_provider(**saml_provider_args).execute_test(negative_case=True)\
.command_response_should_have("Exception occured while updating SAML provider")
AuthTest("Set LDAP_UPDATE_ENTRY_FAIL fault point").inject_fault("LDAP_UPDATE_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = 'Update SAML provider should fail'
saml_provider_args = {}
saml_provider_args['SAMLProviderArn'] = "arn:seagate:iam:::S3IDP"
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
auth_test = AuthTest(test_msg)
auth_test.update_saml_provider(**saml_provider_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_UPDATE_ENTRY_FAIL fault point").reset_fault("LDAP_UPDATE_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while updating SAML provider")
# DELETE
saml_provider_args = {}
saml_provider_args['Name'] = 'S3IDP'
saml_provider_args['SAMLMetadataDocument'] = metadata_doc_full_path
saml_provider_args['SAMLProviderArn'] = response_elements['SAMLProviderArn']
AuthTest("Set LDAP_SEARCH_FAIL fault point").inject_fault("LDAP_SEARCH_FAIL", "SKIP_FIRST_N_TIMES", 3).execute_test()
test_msg = "Delete SAML provider should fail"
auth_test = AuthTest(test_msg)
auth_test.delete_saml_provider(**saml_provider_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_SEARCH_FAIL fault point").reset_fault("LDAP_SEARCH_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while deleting SAML provider")
AuthTest("Set LDAP_DELETE_ENTRY_FAIL fault point").inject_fault("LDAP_DELETE_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = "Delete SAML provider should fail"
auth_test = AuthTest(test_msg)
auth_test.delete_saml_provider(**saml_provider_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_DELETE_ENTRY_FAIL fault point").reset_fault("LDAP_DELETE_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while deleting SAML provider")
test_msg = 'Delete SAML provider'
auth_test = AuthTest(test_msg)
auth_test.delete_saml_provider(**saml_provider_args).execute_test()\
.command_response_should_have("SAML provider deleted.")
test_msg = "Delete SAML provider should fail if it doesn't exist"
auth_test = AuthTest(test_msg)
auth_test.delete_saml_provider(**saml_provider_args).execute_test(negative_case=True)\
.command_response_should_have("Exception occured while deleting SAML provider")
def get_federation_token_test():
federation_token_args = {}
federation_token_args['Name'] = 's3root'
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "SKIP_FIRST_N_TIMES", 1).execute_test()
test_msg = 'Get Federation Token should fail'
auth_test = AuthTest(test_msg)
auth_test.get_federation_token(**federation_token_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while creating federation token")
AuthTest("Set LDAP_ADD_ENTRY_FAIL fault point").inject_fault("LDAP_ADD_ENTRY_FAIL", "FAIL_ALWAYS", 0).execute_test()
test_msg = 'Get Federation Token should fail'
auth_test = AuthTest(test_msg)
auth_test.get_federation_token(**federation_token_args).execute_test(negative_case=True)
AuthTest("Reset LDAP_ADD_ENTRY_FAIL fault point").reset_fault("LDAP_ADD_ENTRY_FAIL").execute_test()
auth_test.command_response_should_have("Exception occured while creating federation token")
test_msg = 'Get Federation Token with invalid DurationSeconds value should fail'
federation_token_args = {}
federation_token_args['Name'] = 's3root'
federation_token_args['DurationSeconds'] = '2'
auth_test = AuthTest(test_msg)
auth_test.get_federation_token(**federation_token_args).execute_test(negative_case=True)\
.command_response_should_have("Parameter validation failed")
test_msg = 'Get Federation Token with duration of 905 seconds'
federation_token_args = {}
federation_token_args['Name'] = 's3root'
federation_token_args['DurationSeconds'] = '905'
response_pattern = "FederatedUserId = [\S]*, AccessKeyId = [\w-]*, SecretAccessKey = [\w/+]*, SessionToken = [\w/+]*$"
auth_test = AuthTest(test_msg)
result = auth_test.get_federation_token(**federation_token_args).execute_test()
result.command_should_match_pattern(response_pattern)
LdapOps().delete_account("s3test")
before_all()
account_tests()
user_tests()
accesskey_tests()
role_tests()
saml_provider_tests()
get_federation_token_test()
|
py
|
1a5dda58e3aeea86cfbc60926574dede83cab2b4
|
# -*- coding: UTF-8 -*-
# Authors: Thomas Hartmann <[email protected]>
# Dirk Gütlin <[email protected]>
#
# License: BSD-3-Clause
import numpy as np
from .utils import _create_info, _set_tmin, _create_events, \
_create_event_metadata, _validate_ft_struct
from ...utils import _check_fname, _import_pymatreader_funcs
from ..array.array import RawArray
from ...epochs import EpochsArray
from ...evoked import EvokedArray
def read_raw_fieldtrip(fname, info, data_name='data'):
"""Load continuous (raw) data from a FieldTrip preprocessing structure.
This function expects to find single trial raw data (FT_DATATYPE_RAW) in
the structure data_name is pointing at.
.. warning:: FieldTrip does not normally store the original information
concerning channel location, orientation, type etc. It is
therefore **highly recommended** to provide the info field.
This can be obtained by reading the original raw data file
with MNE functions (without preload). The returned object
contains the necessary info field.
Parameters
----------
fname : str
Path and filename of the .mat file containing the data.
info : dict or None
The info dict of the raw data file corresponding to the data to import.
If this is set to None, limited information is extracted from the
FieldTrip structure.
data_name : str
Name of heading dict/ variable name under which the data was originally
saved in MATLAB.
Returns
-------
raw : instance of RawArray
A Raw Object containing the loaded data.
"""
read_mat = _import_pymatreader_funcs('FieldTrip I/O')
fname = _check_fname(fname, overwrite='read', must_exist=True)
ft_struct = read_mat(fname,
ignore_fields=['previous'],
variable_names=[data_name])
# load data and set ft_struct to the heading dictionary
ft_struct = ft_struct[data_name]
_validate_ft_struct(ft_struct)
info = _create_info(ft_struct, info) # create info structure
data = np.array(ft_struct['trial']) # create the main data array
if data.ndim > 2:
data = np.squeeze(data)
if data.ndim == 1:
data = data[np.newaxis, ...]
if data.ndim != 2:
raise RuntimeError('The data you are trying to load does not seem to '
'be raw data')
raw = RawArray(data, info) # create an MNE RawArray
return raw
def read_epochs_fieldtrip(fname, info, data_name='data',
trialinfo_column=0):
"""Load epoched data from a FieldTrip preprocessing structure.
This function expects to find epoched data in the structure data_name is
pointing at.
.. warning:: Only epochs with the same amount of channels and samples are
supported!
.. warning:: FieldTrip does not normally store the original information
concerning channel location, orientation, type etc. It is
therefore **highly recommended** to provide the info field.
This can be obtained by reading the original raw data file
with MNE functions (without preload). The returned object
contains the necessary info field.
Parameters
----------
fname : str
Path and filename of the .mat file containing the data.
info : dict or None
The info dict of the raw data file corresponding to the data to import.
If this is set to None, limited information is extracted from the
FieldTrip structure.
data_name : str
Name of heading dict/ variable name under which the data was originally
saved in MATLAB.
trialinfo_column : int
Column of the trialinfo matrix to use for the event codes.
Returns
-------
epochs : instance of EpochsArray
An EpochsArray containing the loaded data.
"""
read_mat = _import_pymatreader_funcs('FieldTrip I/O')
ft_struct = read_mat(fname,
ignore_fields=['previous'],
variable_names=[data_name])
# load data and set ft_struct to the heading dictionary
ft_struct = ft_struct[data_name]
_validate_ft_struct(ft_struct)
info = _create_info(ft_struct, info) # create info structure
data = np.array(ft_struct['trial']) # create the epochs data array
events = _create_events(ft_struct, trialinfo_column)
if events is not None:
metadata = _create_event_metadata(ft_struct)
else:
metadata = None
tmin = _set_tmin(ft_struct) # create start time
epochs = EpochsArray(data=data, info=info, tmin=tmin,
events=events, metadata=metadata, proj=False)
return epochs
def read_evoked_fieldtrip(fname, info, comment=None,
data_name='data'):
"""Load evoked data from a FieldTrip timelocked structure.
This function expects to find timelocked data in the structure data_name is
pointing at.
.. warning:: FieldTrip does not normally store the original information
concerning channel location, orientation, type etc. It is
therefore **highly recommended** to provide the info field.
This can be obtained by reading the original raw data file
with MNE functions (without preload). The returned object
contains the necessary info field.
Parameters
----------
fname : str
Path and filename of the .mat file containing the data.
info : dict or None
The info dict of the raw data file corresponding to the data to import.
If this is set to None, limited information is extracted from the
FieldTrip structure.
comment : str
Comment on dataset. Can be the condition.
data_name : str
Name of heading dict/ variable name under which the data was originally
saved in MATLAB.
Returns
-------
evoked : instance of EvokedArray
An EvokedArray containing the loaded data.
"""
read_mat = _import_pymatreader_funcs('FieldTrip I/O')
ft_struct = read_mat(fname,
ignore_fields=['previous'],
variable_names=[data_name])
ft_struct = ft_struct[data_name]
_validate_ft_struct(ft_struct)
info = _create_info(ft_struct, info) # create info structure
data_evoked = ft_struct['avg'] # create evoked data
evoked = EvokedArray(data_evoked, info, comment=comment)
return evoked
|
py
|
1a5ddad64106ee8178ea4c837c664f741ac343f7
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import OrderedDict
from logging import INFO
from typing import Union
import pytest
import torch
import torch.nn.utils.prune as pytorch_prune
from torch import nn
from torch.nn import Sequential
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, ModelPruning
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class TestModel(BoringModel):
test_step = None
def __init__(self):
super().__init__()
self.layer = Sequential(
OrderedDict([
("mlp_1", nn.Linear(32, 32)),
("mlp_2", nn.Linear(32, 32, bias=False)),
("mlp_3", nn.Linear(32, 2)),
])
)
def training_step(self, batch, batch_idx):
self.log("test", -batch_idx)
return super().training_step(batch, batch_idx)
class TestPruningMethod(pytorch_prune.BasePruningMethod):
PRUNING_TYPE = "unstructured"
def compute_mask(self, _, default_mask):
mask = default_mask.clone()
# Prune every other entry in a tensor
mask.view(-1)[::2] = 0
return mask
@classmethod
def apply(cls, module, name, amount):
return super(TestPruningMethod, cls).apply(module, name, amount=amount)
def train_with_pruning_callback(
tmpdir,
parameters_to_prune=False,
use_global_unstructured=False,
pruning_fn="l1_unstructured",
use_lottery_ticket_hypothesis=False,
accelerator=None,
gpus=None,
num_processes=1,
):
model = TestModel()
# Weights are random. None is 0
assert torch.all(model.layer.mlp_2.weight != 0)
pruning_kwargs = {
"pruning_fn": pruning_fn,
"amount": 0.3,
"use_global_unstructured": use_global_unstructured,
"use_lottery_ticket_hypothesis": use_lottery_ticket_hypothesis,
"verbose": 1,
}
if parameters_to_prune:
pruning_kwargs["parameters_to_prune"] = [(model.layer.mlp_1, "weight"), (model.layer.mlp_2, "weight")]
else:
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured"):
pruning_kwargs["parameter_names"] = ["weight"]
else:
pruning_kwargs["parameter_names"] = ["weight", "bias"]
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured"):
pruning_kwargs["pruning_dim"] = 0
if pruning_fn == "ln_structured":
pruning_kwargs["pruning_norm"] = 1
# Misconfiguration checks
if isinstance(pruning_fn, str) and pruning_fn.endswith("_structured") and use_global_unstructured:
with pytest.raises(MisconfigurationException, match="is supported with `use_global_unstructured=True`"):
ModelPruning(**pruning_kwargs)
return
if ModelPruning._is_pruning_method(pruning_fn) and not use_global_unstructured:
with pytest.raises(MisconfigurationException, match="currently only supported with"):
ModelPruning(**pruning_kwargs)
return
pruning = ModelPruning(**pruning_kwargs)
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
weights_summary=None,
checkpoint_callback=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=10,
accelerator=accelerator,
gpus=gpus,
num_processes=num_processes,
callbacks=pruning,
)
trainer.fit(model)
trainer.test(model)
if not accelerator:
# Check some have been pruned
assert torch.any(model.layer.mlp_2.weight == 0)
def test_pruning_misconfiguration():
with pytest.raises(MisconfigurationException, match=r"chocolate isn't in \('weight', 'bias'\)"):
ModelPruning(pruning_fn="l1_unstructured", parameter_names=["chocolate"])
with pytest.raises(MisconfigurationException, match=r"expected to be a str in \["):
ModelPruning(pruning_fn={}) # noqa
with pytest.raises(MisconfigurationException, match="should be provided"):
ModelPruning(pruning_fn="random_structured")
with pytest.raises(MisconfigurationException, match=r"must be any of \(0, 1, 2\)"):
ModelPruning(pruning_fn="l1_unstructured", verbose=3)
with pytest.raises(MisconfigurationException, match="requesting `ln_structured` pruning, the `pruning_norm`"):
ModelPruning(pruning_fn="ln_structured", pruning_dim=0)
@pytest.mark.parametrize("parameters_to_prune", [False, True])
@pytest.mark.parametrize("use_global_unstructured", [False, True])
@pytest.mark.parametrize(
"pruning_fn", ["l1_unstructured", "random_unstructured", "ln_structured", "random_structured", TestPruningMethod]
)
@pytest.mark.parametrize("use_lottery_ticket_hypothesis", [False, True])
def test_pruning_callback(
tmpdir, use_global_unstructured: bool, parameters_to_prune: bool,
pruning_fn: Union[str, pytorch_prune.BasePruningMethod], use_lottery_ticket_hypothesis: bool
):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=parameters_to_prune,
use_global_unstructured=use_global_unstructured,
pruning_fn=pruning_fn,
use_lottery_ticket_hypothesis=use_lottery_ticket_hypothesis,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_0(tmpdir):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=False,
use_global_unstructured=False,
accelerator="ddp",
gpus=2,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_1(tmpdir):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=False,
use_global_unstructured=True,
accelerator="ddp",
gpus=2,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_2(tmpdir):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=True,
use_global_unstructured=False,
accelerator="ddp",
gpus=2,
)
@RunIf(special=True, min_gpus=2)
def test_pruning_callback_ddp_3(tmpdir):
train_with_pruning_callback(
tmpdir,
parameters_to_prune=True,
use_global_unstructured=True,
accelerator="ddp",
gpus=2,
)
@RunIf(min_gpus=2, skip_windows=True)
def test_pruning_callback_ddp_spawn(tmpdir):
train_with_pruning_callback(tmpdir, use_global_unstructured=True, accelerator="ddp_spawn", gpus=2)
@RunIf(skip_windows=True)
def test_pruning_callback_ddp_cpu(tmpdir):
train_with_pruning_callback(tmpdir, parameters_to_prune=True, accelerator="ddp_cpu", num_processes=2)
@pytest.mark.parametrize("resample_parameters", (False, True))
def test_pruning_lth_callable(tmpdir, resample_parameters: bool):
model = TestModel()
class ModelPruningTestCallback(ModelPruning):
lth_calls = 0
def apply_lottery_ticket_hypothesis(self):
super().apply_lottery_ticket_hypothesis()
self.lth_calls += 1
for d in self._original_layers.values():
copy, names = d["data"], d["names"]
for i, name in names:
curr, curr_name = self._parameters_to_prune[i]
assert name == curr_name
actual, expected = getattr(curr, name).data, getattr(copy, name).data
allclose = torch.allclose(actual, expected)
assert not allclose if self._resample_parameters else allclose
pruning = ModelPruningTestCallback(
"l1_unstructured", use_lottery_ticket_hypothesis=lambda e: bool(e % 2), resample_parameters=resample_parameters
)
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
weights_summary=None,
checkpoint_callback=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=5,
callbacks=pruning,
)
trainer.fit(model)
assert pruning.lth_calls == trainer.max_epochs // 2
@pytest.mark.parametrize("make_pruning_permanent", (False, True))
def test_multiple_pruning_callbacks(tmpdir, caplog, make_pruning_permanent: bool):
model = TestModel()
pruning_kwargs = {
'parameters_to_prune': [(model.layer.mlp_1, "weight"), (model.layer.mlp_3, "weight")],
'verbose': 2,
"make_pruning_permanent": make_pruning_permanent
}
p1 = ModelPruning("l1_unstructured", amount=0.5, apply_pruning=lambda e: not e % 2, **pruning_kwargs)
p2 = ModelPruning("random_unstructured", amount=0.25, apply_pruning=lambda e: e % 2, **pruning_kwargs)
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
weights_summary=None,
checkpoint_callback=False,
logger=False,
limit_train_batches=10,
limit_val_batches=2,
max_epochs=3,
callbacks=[p1, p2],
)
with caplog.at_level(INFO):
trainer.fit(model)
actual = [m.strip() for m in caplog.messages]
actual = [m for m in actual if m.startswith("Applied")]
percentage = r"\(\d+(?:\.\d+)?%\)"
expected = [
rf"Applied `L1Unstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.5. Pruned: 0 \(0.00%\) -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.5. Pruned: 0 \(0.00%\) -> \d+ {percentage}", # noqa: E501
rf"Applied `RandomUnstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `RandomUnstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.25. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `RandomUnstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.25. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured`. Pruned: \d+\/1122 {percentage} -> \d+\/1122 {percentage}",
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=32, bias=True\).weight` with amount=0.5. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
rf"Applied `L1Unstructured` to `Linear\(in_features=32, out_features=2, bias=True\).weight` with amount=0.5. Pruned: \d+ {percentage} -> \d+ {percentage}", # noqa: E501
]
expected = [re.compile(s) for s in expected]
assert all(regex.match(s) for s, regex in zip(actual, expected))
filepath = str(tmpdir / "foo.ckpt")
trainer.save_checkpoint(filepath)
model.load_from_checkpoint(filepath, strict=False)
has_pruning = hasattr(model.layer.mlp_1, "weight_orig")
assert not has_pruning if make_pruning_permanent else has_pruning
@pytest.mark.parametrize("on_train_epoch_end", (False, True))
def test_permanent_when_model_is_saved_multiple_times(tmpdir, caplog, on_train_epoch_end):
"""
When a model is saved multiple times and make_permanent=True, we need to
make sure a copy is pruned and not the trained model if we want to continue
with the same pruning buffers.
"""
class TestPruning(ModelPruning):
def on_save_checkpoint(self, trainer, pl_module, checkpoint):
super().on_save_checkpoint(trainer, pl_module, checkpoint)
if not on_train_epoch_end:
# these checks only work if pruning on `validation_epoch_end`
# because `on_save_checkpoint` is called before `on_train_epoch_end`
assert "layer.mlp_3.weight_orig" not in checkpoint["state_dict"]
assert hasattr(pl_module.layer.mlp_3, "weight_orig")
model = TestModel()
pruning_callback = TestPruning(
"random_unstructured",
parameters_to_prune=[(model.layer.mlp_3, "weight")],
verbose=1,
make_pruning_permanent=True,
prune_on_train_epoch_end=on_train_epoch_end,
)
ckpt_callback = ModelCheckpoint(monitor="test", save_top_k=2, save_last=True)
trainer = Trainer(callbacks=[pruning_callback, ckpt_callback], max_epochs=3, progress_bar_refresh_rate=0)
with caplog.at_level(INFO):
trainer.fit(model)
actual = [m.strip() for m in caplog.messages]
actual = [m for m in actual if m.startswith("Applied")]
percentage = r"\(\d+(?:\.\d+)?%\)"
expected = [
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
rf"Applied `RandomUnstructured`. Pruned: \d+\/66 {percentage} -> \d+\/66 {percentage}",
]
expected = [re.compile(s) for s in expected]
assert all(regex.match(s) for s, regex in zip(actual, expected))
# removed on_train_end
assert not hasattr(model.layer.mlp_3, "weight_orig")
model.load_from_checkpoint(trainer.checkpoint_callback.kth_best_model_path)
assert not hasattr(model.layer.mlp_3, "weight_orig")
model.load_from_checkpoint(trainer.checkpoint_callback.last_model_path)
assert not hasattr(model.layer.mlp_3, "weight_orig")
|
py
|
1a5ddb09f7621759c417ab1a115aaf6fc6222ace
|
# `$ python3 simple_ast.py --help` for more information
# MIT License
#
# Copyright (c) 2020 John Scott
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import os
import json
import re
def main():
parser = argparse.ArgumentParser(description='Generate a simple abstract syntax tree from the given files', epilog="""
Parsing rules
This parser uses three values:
bounds A dictionary of start and end tokens. If the program finds a start
token it will push a new array on the stack and continue. When it
finds the corresponding end token the program will pop the array off
the stack and continue.
extra An array of tokens that don't push or pop when found (unless they're
in the bounds).
strip An array of tokens that will be removed from the output.
Example rules:
{
"bounds": { "(": ")" },
"extra": [ "-", "+", "*", "/", "%" ],
"strip": [ "\n", " " ]
}
""", formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input', nargs='+', help='Files to be parsed')
parser.add_argument('--output', default='-', help='Location to save the AST')
parser.add_argument('--rules', help='A JSON file containing the parsing rules')
args = parser.parse_args()
rules = {}
if args.rules:
with open(args.rules, 'r') as f:
rules = json.load(f)
if 'bounds' not in rules:
rules['bounds'] = {}
if 'extra' not in rules:
rules['extra'] = ['\n']
if 'strip' not in rules:
rules['strip'] = []
if args.rules:
with open(args.rules, "w") as file:
file.write(json.dumps(rules, sort_keys=True, indent=2))
ast = {}
for input_path in args.input:
with open(input_path, 'r') as file:
text = file.read()
ast[input_path] = generate_ast(text, bounds=rules['bounds'], extra=rules['extra']+rules['strip'], strip=rules['strip'])
if len(ast) == 1:
ast = list(ast.values())[0]
outputContent = json.dumps(ast, sort_keys=True, indent=2)
if args.output != '-':
with open(args.output, "w") as file:
file.write(outputContent)
else:
print(outputContent)
def generate_ast(text, bounds={}, extra=['\n'], strip=['\n']):
boundingTokenRegex = '|'.join(map(lambda s: "("+re.escape(s)+")", sorted(list(bounds.keys()) + list(bounds.values()) + extra,reverse=True)))
tokens = re.compile(boundingTokenRegex).split(text)
stack = [[]]
for token in tokens:
if token is None or len(token) == 0:
continue
if token in bounds:
frame = []
stack[-1].append(frame)
stack.append(frame)
if token not in strip:
stack[-1].append(token)
if len(stack) > 1 and isinstance(stack[-1][0], str) and stack[-1][0] in bounds and token == bounds[stack[-1][0]]:
stack.pop()
return stack[0]
if __name__ == "__main__":
main()
|
py
|
1a5ddb3ffd9ecd56e18458d10aa6ebb42c9f358a
|
# -*- coding: utf-8 -*-
import pytest
import binascii
import time
import sys
sys.path.extend(["../"])
from bbc1.core import bbclib, bbc_app
from bbc1.core.message_key_types import KeyType
from testutils import prepare, get_core_client, start_core_thread, make_client, domain_setup_utility
LOGLEVEL = 'debug'
#LOGLEVEL = 'none'
core_num = 5
client_num = core_num * 2
cores = None
clients = None
domain_id = bbclib.get_new_id("testdomain")
asset_group_id = bbclib.get_new_id("asset_group_1")
transactions = [None for i in range(client_num)]
msg_processor = [None for i in range(client_num)]
class MessageProcessor(bbc_app.Callback):
def __init__(self, index=0):
super(MessageProcessor, self).__init__(self)
self.idx = index
def proc_cmd_sign_request(self, dat):
self.logger.debug("[%i] Recv SIGN_REQUEST from %s" % (self.idx, binascii.b2a_hex(dat[KeyType.source_user_id])))
txobj, fmt_type = bbclib.deserialize(dat[KeyType.transaction_data])
objs = dict()
for txid, txdata in dat[KeyType.transactions].items():
txo, fmt_type = bbclib.deserialize(txdata)
objs[txid] = txo
for i, reference in enumerate(txobj.references):
event = objs[reference.transaction_id].events[reference.event_index_in_ref]
if clients[self.idx]['user_id'] in event.mandatory_approvers:
signature = txobj.sign(keypair=clients[self.idx]['keypair'])
clients[self.idx]['app'].sendback_signature(asset_group_id, dat[KeyType.source_user_id],
txobj.transaction_id, i, signature)
return
class TestBBcAppClient(object):
def test_00_setup(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
print("domain_id =", binascii.b2a_hex(domain_id))
global msg_processor
prepare(core_num=core_num, client_num=client_num, loglevel=LOGLEVEL)
for i in range(core_num):
start_core_thread(index=i, core_port_increment=i, p2p_port_increment=i)
time.sleep(0.1)
domain_setup_utility(i, domain_id) # system administrator
time.sleep(1)
for i in range(core_num):
msg_processor[i*2] = MessageProcessor(index=i*2)
make_client(index=i*2, core_port_increment=i, callback=msg_processor[i*2])
msg_processor[i * 2 + 1] = MessageProcessor(index=i*2+1)
make_client(index=i * 2 + 1, core_port_increment=i, callback=msg_processor[i * 2 + 1])
time.sleep(1)
global cores, clients
cores, clients = get_core_client()
def test_10_setup_network(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
ret = clients[0]['app'].get_domain_neighborlist(domain_id=domain_id)
assert ret
dat = msg_processor[0].synchronize()
print("[0] nodeinfo=", dat[0])
node_id, ipv4, ipv6, port, domain0 = dat[0]
for i in range(1, core_num):
clients[i*2]['app'].send_domain_ping(domain_id, ipv4, ipv6, port)
print("*** wait 5 seconds ***")
time.sleep(5)
for i in range(core_num):
print(cores[i].networking.domains[domain_id]['neighbor'].show_list())
assert len(cores[i].networking.domains[domain_id]['neighbor'].nodeinfo_list) == core_num - 1
def test_11_register(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
global clients
for cl in clients:
ret = cl['app'].register_to_core()
assert ret
time.sleep(1)
for i in range(4):
assert clients[i]['app'].request_insert_completion_notification(asset_group_id)
time.sleep(2)
for i in range(core_num):
fe = cores[i].networking.domains[domain_id]['user'].forwarding_entries
assert asset_group_id in fe
print(fe[asset_group_id]['nodes'])
num = len(fe[asset_group_id]['nodes'])
if i in [0, 1]: # core0 and core1 have forwarding entry for core1 and core0, respectively.
assert num == 1
else:
assert num == 2
def test_12_cancel_notification(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
clients[0]['app'].cancel_insert_completion_notification(asset_group_id)
clients[2]['app'].cancel_insert_completion_notification(asset_group_id)
time.sleep(1)
for i in range(core_num):
fe = cores[i].networking.domains[domain_id]['user'].forwarding_entries
assert asset_group_id in fe
print(fe[asset_group_id]['nodes'])
num = len(fe[asset_group_id]['nodes'])
if i in [0, 1]: # core0 and core1 have forwarding entry for core1 and core0, respectively.
assert num == 1
else:
assert num == 2
def test_13_cancel_notification(self):
print("\n-----", sys._getframe().f_code.co_name, "-----")
clients[1]['app'].cancel_insert_completion_notification(asset_group_id)
time.sleep(1)
for i in range(core_num):
fe = cores[i].networking.domains[domain_id]['user'].forwarding_entries
if i == 1: # core1 has no forwarding entry because all clients in core0 canceled multicast forwarding
assert asset_group_id not in fe
else:
assert asset_group_id in fe
print(fe[asset_group_id]['nodes'])
num = len(fe[asset_group_id]['nodes'])
assert num == 1
if __name__ == '__main__':
pytest.main()
|
py
|
1a5ddb5b8d170c43ba2b3224f3362663faafc645
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualMachineExtension(Resource):
"""Describes a Virtual Machine Extension.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param force_update_tag: How the extension handler should be forced to
update even if the extension configuration has not changed.
:type force_update_tag: str
:param publisher: The name of the extension handler publisher.
:type publisher: str
:param virtual_machine_extension_type: The type of the extension handler.
:type virtual_machine_extension_type: str
:param type_handler_version: The type version of the extension handler.
:type type_handler_version: str
:param auto_upgrade_minor_version: Whether the extension handler should be
automatically upgraded across minor versions.
:type auto_upgrade_minor_version: bool
:param settings: Json formatted public settings for the extension.
:type settings: object
:param protected_settings: Json formatted protected settings for the
extension.
:type protected_settings: object
:ivar provisioning_state: The provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param instance_view: The virtual machine extension instance view.
:type instance_view: :class:`VirtualMachineExtensionInstanceView
<azure.mgmt.compute.v2016_04_30_preview.models.VirtualMachineExtensionInstanceView>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'publisher': {'key': 'properties.publisher', 'type': 'str'},
'virtual_machine_extension_type': {'key': 'properties.type', 'type': 'str'},
'type_handler_version': {'key': 'properties.typeHandlerVersion', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'settings': {'key': 'properties.settings', 'type': 'object'},
'protected_settings': {'key': 'properties.protectedSettings', 'type': 'object'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'instance_view': {'key': 'properties.instanceView', 'type': 'VirtualMachineExtensionInstanceView'},
}
def __init__(self, location, tags=None, force_update_tag=None, publisher=None, virtual_machine_extension_type=None, type_handler_version=None, auto_upgrade_minor_version=None, settings=None, protected_settings=None, instance_view=None):
super(VirtualMachineExtension, self).__init__(location=location, tags=tags)
self.force_update_tag = force_update_tag
self.publisher = publisher
self.virtual_machine_extension_type = virtual_machine_extension_type
self.type_handler_version = type_handler_version
self.auto_upgrade_minor_version = auto_upgrade_minor_version
self.settings = settings
self.protected_settings = protected_settings
self.provisioning_state = None
self.instance_view = instance_view
|
py
|
1a5ddb5f43047e30d19f1bdef816d8abdd7ef8c5
|
import os
import platform
import pytest
from conans.test.assets.genconanfile import GenConanfile
from conans.test.integration.toolchains.apple.test_xcodetoolchain import _get_filename
from conans.test.utils.tools import TestClient
_expected_dep_xconfig = [
"HEADER_SEARCH_PATHS = $(inherited) $(HEADER_SEARCH_PATHS_{name}_{name})",
"GCC_PREPROCESSOR_DEFINITIONS = $(inherited) $(GCC_PREPROCESSOR_DEFINITIONS_{name}_{name})",
"OTHER_CFLAGS = $(inherited) $(OTHER_CFLAGS_{name}_{name})",
"OTHER_CPLUSPLUSFLAGS = $(inherited) $(OTHER_CPLUSPLUSFLAGS_{name}_{name})",
"FRAMEWORK_SEARCH_PATHS = $(inherited) $(FRAMEWORK_SEARCH_PATHS_{name}_{name})",
"LIBRARY_SEARCH_PATHS = $(inherited) $(LIBRARY_SEARCH_PATHS_{name}_{name})",
"OTHER_LDFLAGS = $(inherited) $(OTHER_LDFLAGS_{name}_{name})",
]
_expected_conf_xconfig = [
"HEADER_SEARCH_PATHS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"GCC_PREPROCESSOR_DEFINITIONS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"OTHER_CFLAGS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"OTHER_CPLUSPLUSFLAGS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"FRAMEWORK_SEARCH_PATHS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"LIBRARY_SEARCH_PATHS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = ",
"OTHER_LDFLAGS_{name}_{name}[config={configuration}][arch={architecture}][sdk={sdk}{sdk_version}] = "
]
def expected_files(current_folder, configuration, architecture, sdk, sdk_version):
files = []
name = _get_filename(configuration, architecture, sdk, sdk_version)
deps = ["hello", "goodbye"]
files.extend(
[os.path.join(current_folder, "conan_{dep}_{dep}{name}.xcconfig".format(dep=dep, name=name)) for dep in deps])
files.append(os.path.join(current_folder, "conandeps.xcconfig"))
return files
def check_contents(client, deps, configuration, architecture, sdk, sdk_version):
for dep_name in deps:
dep_xconfig = client.load("conan_{dep}_{dep}.xcconfig".format(dep=dep_name))
conf_name = "conan_{}_{}{}.xcconfig".format(dep_name, dep_name,
_get_filename(configuration, architecture, sdk, sdk_version))
assert '#include "{}"'.format(conf_name) in dep_xconfig
for var in _expected_dep_xconfig:
line = var.format(name=dep_name)
assert line in dep_xconfig
conan_conf = client.load(conf_name)
for var in _expected_conf_xconfig:
assert var.format(name=dep_name, configuration=configuration, architecture=architecture,
sdk=sdk, sdk_version=sdk_version) in conan_conf
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
def test_generator_files():
client = TestClient()
client.save({"hello.py": GenConanfile().with_settings("os", "arch", "compiler", "build_type")
.with_package_info(cpp_info={"libs": ["hello"],
"frameworks": ['framework_hello']},
env_info={})})
client.run("export hello.py hello/0.1@")
client.save({"goodbye.py": GenConanfile().with_settings("os", "arch", "compiler", "build_type")
.with_package_info(cpp_info={"libs": ["goodbye"],
"frameworks": ['framework_goodbye']},
env_info={})})
client.run("export goodbye.py goodbye/0.1@")
client.save({"conanfile.txt": "[requires]\nhello/0.1\ngoodbye/0.1\n"}, clean_first=True)
for build_type in ["Release", "Debug"]:
client.run("install . -g XcodeDeps -s build_type={} -s arch=x86_64 -s os.sdk=macosx -s os.sdk_version=12.1 --build missing".format(build_type))
for config_file in expected_files(client.current_folder, build_type, "x86_64", "macosx", "12.1"):
assert os.path.isfile(config_file)
conandeps = client.load("conandeps.xcconfig")
assert '#include "conan_hello.xcconfig"' in conandeps
assert '#include "conan_goodbye.xcconfig"' in conandeps
conan_config = client.load("conan_config.xcconfig")
assert '#include "conandeps.xcconfig"' in conan_config
check_contents(client, ["hello", "goodbye"], build_type, "x86_64", "macosx", "12.1")
|
py
|
1a5ddc00c59f7605ea995595a1779a7d78abc283
|
import mock
import os
import shutil
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from core import models
from core.datatools import ansible, tasks
class Ansible(TestCase):
def setUp(self):
var = models.Variable.objects.create(
name='Test_name',
value='Test_var'
)
empty_host_group = models.HostGroup.objects.create(
name="Empty test host_group",
)
host_group = models.HostGroup.objects.create(
name='Test host_group',
)
host_group.vars.add(var)
host = models.Host.objects.create(
name='Test host',
address='192.168.59.44',
)
host.groups.add(host_group)
host.vars.add(var)
other_host = models.Host.objects.create(
name='Test №2 host',
address='192.168.128.20',
)
other_host.vars.add(var)
ansible_user = models.AnsibleUser.objects.create(
name='Serega'
)
self.user = User.objects.create(
username='Serega',
password='passwd'
)
task_template = models.TaskTemplate.objects.create(
name='qwer',
playbook='/home/',
)
task = models.Task.objects.create(
playbook='/home/',
template=task_template,
user=self.user,
ansible_user=ansible_user,
)
task.host_groups.add(host_group)
task.hosts.add(host)
task.hosts.add(other_host)
task.vars.add(var)
task2 = models.Task.objects.create(
playbook="/home2/",
template=task_template,
user=self.user,
ansible_user=ansible_user,
)
# task2.host_groups.add(empty_host_group)
@mock.patch('core.datatools.ansible.create_inventory')
def test_make_command(self, create_inventory_mock):
test_path_inventory = '/tmp/test/inventory'
create_inventory_mock.return_value = test_path_inventory
self.assertEqual(models.Task.objects.get(playbook='/home/').get_ansible_command(),
'/usr/bin/ansible-playbook -i ' + test_path_inventory +
' -u Serega -e "Test_name=Test_var " -v /home/')
@mock.patch('core.datatools.ansible.tempfile.mkdtemp')
def test_create_inventory(self, tempfile_mock):
test_path_tempfile = '/tmp/test'
tempfile_mock.return_value = test_path_tempfile
if os.path.exists(test_path_tempfile):
shutil.rmtree(test_path_tempfile)
os.mkdir(test_path_tempfile)
self.assertRaises(Exception, ansible.create_inventory, models.Task.objects.get(playbook='/home2/'))
shutil.rmtree(test_path_tempfile)
def test_inventory_file_path(self):
self.assertEqual(ansible.get_inventory_file_path('qwerty 12345 test some 55'), 'test')
class Tasks(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd'
)
def test_check_progress_tasks_not_pid(self):
models.Task.objects.create(
playbook='/home/',
status='in_progress',
user=self.user,
pid=99999999,
)
task_manager = tasks.TaskManager()
task_manager.check_in_progress_tasks()
self.assertEqual(len(models.TaskLog.objects.filter(message='Task with pid 99999999 is not running')), 1)
@mock.patch('django.db.connection')
def test_start_waiting_task(self, connection):
connection.return_value = True
an_user = models.AnsibleUser.objects.create(
name='Test',
)
models.Task.objects.create(
playbook='/home/',
status='wait',
user=self.user,
pid=99999999,
ansible_user=an_user,
)
task_manager = tasks.TaskManager()
task_manager.start_waiting_tasks()
self.assertIn('Start task with pid', models.TaskLog.objects.get().message)
self.assertEqual(models.Task.objects.get().status, 'in_progress')
@override_settings(ANSIBLE_WORK_DIR='/tmp/')
@mock.patch('django.db.connection')
def test_run_task_invalid(self, connection):
connection.return_value = True
an_user = models.AnsibleUser.objects.create(
name='Test',
)
task = models.Task.objects.create(
playbook='/home/',
status='wait',
user=self.user,
pid=99999999,
ansible_user=an_user,
)
host = models.Host.objects.create(
name='Test host',
address='192.168.59.44',
)
task.hosts.add(host)
task_manager = tasks.TaskManager()
task_manager.run_task(task.id)
self.assertIn('Command: ', models.TaskLog.objects.get(id=1).message)
self.assertIn('Working directory: ', models.TaskLog.objects.get(id=2).message)
self.assertIn('Failed with status code ', models.TaskLog.objects.all().last().message)
@mock.patch('asyncio.get_event_loop')
@mock.patch('django.db.connection')
def test_run_task_exception(self, connection, p):
connection.return_value = True
p.return_value = 0
an_user = models.AnsibleUser.objects.create(
name='Test',
)
task = models.Task.objects.create(
playbook='/home/',
status='wait',
user=self.user,
pid=99999999,
ansible_user=an_user,
)
task_manager = tasks.TaskManager()
task_manager.run_task(task.id)
self.assertIn('Progress error', models.TaskLog.objects.all().last().message)
def test_stop_task(self):
task = models.Task.objects.create(
playbook='/home/',
status='wait',
user=self.user,
pid=99999999,
)
task_manager = tasks.TaskManager()
task_manager.stop_task(task)
self.assertEqual(models.TaskLog.objects.get().message, 'Task stopped')
self.assertEqual(models.Task.objects.get().status, 'stopped')
|
py
|
1a5ddc967d237813c383152c40351c0ce558bba4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016 The Kubeflow Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import copy
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
args = parser.parse_args()
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
def get_refs():
ref_file = open(os.path.join(rootdir, "build/boilerplate/boilerplate.txt"))
ref = ref_file.read().splitlines()
ref_file.close()
refs = {}
for extension in ["sh", "go", "py"]:
refs[extension] = copy.copy(ref)
prefix = ""
if extension == "go":
prefix = "//"
else:
prefix = "#"
for i in range(len(refs[extension])):
if len(refs[extension][i]) != 0:
p = prefix + " "
else:
p = prefix
refs[extension][i] = p + refs[extension][i]
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except: # noqa: E722
return False
data = f.read()
f.close()
extension = file_extension(filename)
ref = refs[extension]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh" or extension == "py":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'vendor', 'third_party', '_gopath', '_output', '.git']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(rootdir):
# don't visit certain dirs. This is just a performance improvement as we
# would prune these later in normalize_files(). But doing it cuts down the
# amount of filesystem walking we do and cuts down the size of the file
# list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
extension = file_extension(pathname)
if extension in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the
# real thing
regexs["year"] = re.compile('YEAR')
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile('(2014|2015|2016|2017|2018|2019|2020)')
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n",
re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
|
py
|
1a5ddd1b93d7b8c4abec9b6c3aa3d0edbff33828
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Liberated Pixel Cup [(LPC)][1] Sprites Dataset.
This file provides logic to download and build a version of the sprites
video sequence dataset as used in the Disentangled Sequential
Autoencoder paper [(Li and Mandt, 2018)][2].
#### References:
[1]: Liberated Pixel Cup. http://lpc.opengameart.org. Accessed:
2018-07-20.
[2]: Yingzhen Li and Stephan Mandt. Disentangled Sequential Autoencoder.
In _International Conference on Machine Learning_, 2018.
https://arxiv.org/abs/1803.02991
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import random
import zipfile
from absl import flags
from six.moves import urllib
import tensorflow as tf
__all__ = ["SpritesDataset"]
flags.DEFINE_string(
"data_dir",
default=os.path.join(
os.getenv("TEST_TMPDIR", "/tmp"),
os.path.join("disentangled_vae", "data")),
help="Directory where the dataset is stored.")
DATA_SPRITES_URL = "https://github.com/jrconway3/Universal-LPC-spritesheet/archive/master.zip"
DATA_SPRITES_DIR = "Universal-LPC-spritesheet-master"
WIDTH = 832
HEIGHT = 1344
FRAME_SIZE = 64
CHANNELS = 4
SKIN_COLORS = [
os.path.join("body", "male", "light.png"),
os.path.join("body", "male", "tanned2.png"),
os.path.join("body", "male", "darkelf.png"),
os.path.join("body", "male", "darkelf2.png"),
os.path.join("body", "male", "dark.png"),
os.path.join("body", "male", "dark2.png")
]
HAIRSTYLES = [
os.path.join("hair", "male", "messy2", "green2.png"),
os.path.join("hair", "male", "ponytail", "blue2.png"),
os.path.join("hair", "male", "messy1", "light-blonde.png"),
os.path.join("hair", "male", "parted", "white.png"),
os.path.join("hair", "male", "plain", "ruby-red.png"),
os.path.join("hair", "male", "jewfro", "purple.png")
]
TOPS = [
os.path.join(
"torso", "shirts", "longsleeve", "male", "maroon_longsleeve.png"),
os.path.join(
"torso", "shirts", "longsleeve", "male", "teal_longsleeve.png"),
os.path.join(
"torso", "shirts", "longsleeve", "male", "white_longsleeve.png"),
os.path.join("torso", "plate", "chest_male.png"),
os.path.join("torso", "leather", "chest_male.png"),
os.path.join("formal_male_no_th-sh", "shirt.png")
]
PANTS = [
os.path.join("legs", "pants", "male", "white_pants_male.png"),
os.path.join("legs", "armor", "male", "golden_greaves_male.png"),
os.path.join("legs", "pants", "male", "red_pants_male.png"),
os.path.join("legs", "armor", "male", "metal_pants_male.png"),
os.path.join("legs", "pants", "male", "teal_pants_male.png"),
os.path.join("formal_male_no_th-sh", "pants.png")
]
Action = namedtuple("Action", ["name", "start_row", "frames"])
ACTIONS = [
Action("walk", 8, 9),
Action("spellcast", 0, 7),
Action("slash", 12, 6)
]
Direction = namedtuple("Direction", ["name", "row_offset"])
DIRECTIONS = [
Direction("west", 1),
Direction("south", 2),
Direction("east", 3),
]
FLAGS = flags.FLAGS
def read_image(filepath):
"""Returns an image tensor."""
im_bytes = tf.io.read_file(filepath)
im = tf.image.decode_image(im_bytes, channels=CHANNELS)
im = tf.image.convert_image_dtype(im, tf.float32)
return im
def join_seq(seq):
"""Joins a sequence side-by-side into a single image."""
return tf.concat(tf.unstack(seq), 1)
def download_sprites():
"""Downloads the sprites data and returns the saved filepath."""
filepath = os.path.join(FLAGS.data_dir, DATA_SPRITES_DIR)
if not tf.io.gfile.exists(filepath):
if not tf.io.gfile.exists(FLAGS.data_dir):
tf.io.gfile.makedirs(FLAGS.data_dir)
zip_name = "{}.zip".format(filepath)
urllib.request.urlretrieve(DATA_SPRITES_URL, zip_name)
with zipfile.ZipFile(zip_name, "r") as zip_file:
zip_file.extractall(FLAGS.data_dir)
tf.io.gfile.remove(zip_name)
return filepath
def create_character(skin, hair, top, pants):
"""Creates a character sprite from a set of attribute sprites."""
dtype = skin.dtype
hair_mask = tf.cast(hair[..., -1:] <= 0, dtype)
top_mask = tf.cast(top[..., -1:] <= 0, dtype)
pants_mask = tf.cast(pants[..., -1:] <= 0, dtype)
char = (skin * hair_mask) + hair
char = (char * top_mask) + top
char = (char * pants_mask) + pants
return char
def create_seq(character, action_metadata, direction, length=8, start=0):
"""Creates a sequence.
Args:
character: A character sprite tensor.
action_metadata: An action metadata tuple.
direction: An integer representing the direction, i.e., the row
offset within each action group corresponding to a particular
direction.
length: Desired length of the sequence. If this is longer than
the number of available frames, it will roll over to the
beginning.
start: Index of possible frames at which to start the sequence.
Returns:
A sequence tensor.
"""
sprite_start = (action_metadata[0]+direction) * FRAME_SIZE
sprite_end = (action_metadata[0]+direction+1) * FRAME_SIZE
sprite_line = character[sprite_start:sprite_end, ...]
# Extract 64x64 patches that are side-by-side in the sprite, and limit
# to the actual number of frames for the given action.
frames = tf.stack(tf.split(sprite_line, 13, axis=1)) # 13 is a hack
frames = frames[0:action_metadata[1]]
# Extract a slice of the desired length.
# NOTE: Length could be longer than the number of frames, so tile as needed.
frames = tf.roll(frames, shift=-start, axis=0)
frames = tf.tile(frames, [2, 1, 1, 1]) # 2 is a hack
frames = frames[:length]
frames = tf.cast(frames, dtype=tf.float32)
frames.set_shape([length, FRAME_SIZE, FRAME_SIZE, CHANNELS])
return frames
def create_random_seq(character, action_metadata, direction, length=8):
"""Creates a random sequence."""
start = tf.random.uniform([], maxval=action_metadata[1], dtype=tf.int32)
return create_seq(character, action_metadata, direction, length, start)
def create_sprites_dataset(characters, actions, directions, channels=3,
length=8, shuffle=False, fake_data=False):
"""Creates a tf.data pipeline for the sprites dataset.
Args:
characters: A list of (skin, hair, top, pants) tuples containing
relative paths to the sprite png image for each attribute.
actions: A list of Actions.
directions: A list of Directions.
channels: Number of image channels to yield.
length: Desired length of the sequences.
shuffle: Whether or not to shuffle the characters and sequences
start frame.
fake_data: Boolean for whether or not to yield synthetic data.
Returns:
A tf.data.Dataset yielding (seq, skin label index, hair label index,
top label index, pants label index, action label index, skin label
name, hair label_name, top label name, pants label name, action
label name) tuples.
"""
if fake_data:
dummy_image = tf.random.normal([HEIGHT, WIDTH, CHANNELS])
else:
basedir = download_sprites()
action_names = [action.name for action in actions]
action_metadata = [(action.start_row, action.frames) for action in actions]
direction_rows = [direction.row_offset for direction in directions]
chars = tf.data.Dataset.from_tensor_slices(characters)
act_names = tf.data.Dataset.from_tensor_slices(action_names).repeat()
acts_metadata = tf.data.Dataset.from_tensor_slices(action_metadata).repeat()
dir_rows = tf.data.Dataset.from_tensor_slices(direction_rows).repeat()
if shuffle:
chars = chars.shuffle(len(characters))
dataset = tf.data.Dataset.zip((chars, act_names, acts_metadata, dir_rows))
skin_table = tf.contrib.lookup.index_table_from_tensor(sorted(SKIN_COLORS))
hair_table = tf.contrib.lookup.index_table_from_tensor(sorted(HAIRSTYLES))
top_table = tf.contrib.lookup.index_table_from_tensor(sorted(TOPS))
pants_table = tf.contrib.lookup.index_table_from_tensor(sorted(PANTS))
action_table = tf.contrib.lookup.index_table_from_tensor(sorted(action_names))
def process_example(attrs, act_name, act_metadata, dir_row_offset):
"""Processes a dataset row."""
skin_name = attrs[0]
hair_name = attrs[1]
top_name = attrs[2]
pants_name = attrs[3]
if fake_data:
char = dummy_image
else:
skin = read_image(basedir + os.sep + skin_name)
hair = read_image(basedir + os.sep + hair_name)
top = read_image(basedir + os.sep + top_name)
pants = read_image(basedir + os.sep + pants_name)
char = create_character(skin, hair, top, pants)
if shuffle:
seq = create_random_seq(char, act_metadata, dir_row_offset, length)
else:
seq = create_seq(char, act_metadata, dir_row_offset, length)
seq = seq[..., :channels] # limit output channels
skin_idx = skin_table.lookup(skin_name)
hair_idx = hair_table.lookup(hair_name)
top_idx = top_table.lookup(top_name)
pants_idx = pants_table.lookup(pants_name)
act_idx = action_table.lookup(act_name)
return (seq, skin_idx, hair_idx, top_idx, pants_idx, act_idx,
skin_name, hair_name, top_name, pants_name, act_name)
dataset = dataset.map(process_example)
return dataset
class SpritesDataset(object):
"""Liberated Pixel Cup [(LPC)][1] Sprites Dataset.
This file provides logic to download and build a version of the
sprites video sequence dataset as used in the Disentangled Sequential
Autoencoder paper [(Li and Mandt, 2018)][2]. The dataset contains
sprites (graphics files used to generate animated sequences) of human
characters wearing a variety of clothing, and performing a variety of
actions. The paper limits the dataset used for training to four
attribute categories (skin color, hairstyles, tops, and pants), each
of which are limited to include six variants. Thus, there are
6^4 = 1296 possible animated characters in this dataset. The
characters are shuffled and deterministically split such that 1000
characters are used for the training set, and 296 are used for the
testing set. The numbers are consistent with the paper, but the exact
split is impossible to match given the currently available paper
details. The actions are limited to three categories (walking,
casting spells, and slashing), each with three viewing angles.
Sequences of length T=8 frames are generated depicting a given
character performing a given action, starting at a random frame in the
sequence.
Attributes:
train: Training dataset with 1000 characters each performing an
action.
test: Testing dataset with 296 characters each performing an action.
#### References:
[1]: Liberated Pixel Cup. http://lpc.opengameart.org. Accessed:
2018-07-20.
[2]: Yingzhen Li and Stephan Mandt. Disentangled Sequential
Autoencoder. In _International Conference on Machine Learning_,
2018. https://arxiv.org/abs/1803.02991
"""
def __init__(self, channels=3, shuffle_train=True, fake_data=False):
"""Creates the SpritesDataset and stores train and test datasets.
The datasets yield (seq, skin label index, hair label index, top
label index, pants label index, action label index, skin label name,
hair label_name, top label name, pants label name, action label
name) tuples.
Args:
channels: Number of image channels to yield.
shuffle_train: Boolean for whether or not to shuffle the training
set.
fake_data: Boolean for whether or not to yield synthetic data.
Raises:
ValueError: If the number of training or testing examples is
incorrect, or if there is overlap betweem the two datasets.
"""
super(SpritesDataset, self).__init__()
self.frame_size = FRAME_SIZE
self.channels = channels
self.length = 8
num_train = 1000
num_test = 296
characters = [(skin, hair, top, pants)
for skin in sorted(SKIN_COLORS)
for hair in sorted(HAIRSTYLES)
for top in sorted(TOPS)
for pants in sorted(PANTS)]
random.seed(42)
random.shuffle(characters)
train_chars = characters[:num_train]
test_chars = characters[num_train:]
num_train_actual = len(set(train_chars))
num_test_actual = len(set(test_chars))
num_train_test_overlap = len(set(train_chars) & set(test_chars))
if num_train_actual != num_train:
raise ValueError(
"Unexpected number of training examples: {}.".format(
num_train_actual))
if num_test_actual != num_test:
raise ValueError(
"Unexpected number of testing examples: {}.".format(
num_test_actual))
if num_train_test_overlap > 0: # pylint: disable=g-explicit-length-test
raise ValueError(
"Overlap between train and test datasets detected: {}.".format(
num_train_test_overlap))
self.train = create_sprites_dataset(
train_chars, ACTIONS, DIRECTIONS, self.channels, self.length,
shuffle=shuffle_train, fake_data=fake_data)
self.test = create_sprites_dataset(
test_chars, ACTIONS, DIRECTIONS, self.channels, self.length,
shuffle=False, fake_data=fake_data)
|
py
|
1a5dddbce96b12ec428a32356fade4482932c90d
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flash
from flash.core.data.utils import download_data
from flash.image import ObjectDetectionData, ObjectDetector
# 1. Create the DataModule
# Dataset Credit: https://www.kaggle.com/ultralytics/coco128
download_data("https://github.com/zhiqwang/yolov5-rt-stack/releases/download/v0.3.0/coco128.zip", "data/")
datamodule = ObjectDetectionData.from_coco(
train_folder="data/coco128/images/train2017/",
train_ann_file="data/coco128/annotations/instances_train2017.json",
val_split=0.1,
image_size=128,
)
# 2. Build the task
model = ObjectDetector(head="efficientdet", backbone="d0", num_classes=datamodule.num_classes, image_size=128)
# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=1)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
# 4. Detect objects in a few images!
predictions = model.predict(
[
"data/coco128/images/train2017/000000000625.jpg",
"data/coco128/images/train2017/000000000626.jpg",
"data/coco128/images/train2017/000000000629.jpg",
]
)
print(predictions)
# 5. Save the model!
trainer.save_checkpoint("object_detection_model.pt")
|
py
|
1a5dddd24549bfe96a6c95a8e340b871e8fbfbe9
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, suganya and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class IdentityCheck(Document):
pass
|
py
|
1a5dde62cf371b4409a62e088d32f328d4f6ceea
|
"""
_______________________________
Author: Patrick Carlson
Email: [email protected]
-------------------------------
License: MIT License
"""
# Developer defined modules
from src import menu
from utilities import log_helper, menu_strings
def main():
log_helper.start_logger()
print(menu_strings.welcome_string)
menu.main_menu()
if __name__ == '__main__':
main()
|
py
|
1a5ddeabacb144ac5ceedaa339b7dc0599e74478
|
import argparse
def main():
'''
Based on https://docs.python.org/3.5/library/argparse.html#sub-commands
Examples
-----------
Parameter Extraction:
$ photovoltaic_modeling parameter_extraction --short_circuit_current 3.87 --open_circuit_voltage 42.1 --maximum_power_point_current 3.56 --maximum_power_point_voltage 33.7 --number_of_cells_in_series 72
'''
parser = argparse.ArgumentParser(prog='Executes a specified root finding')
# "dest": the name of the variable that holds the name of subparser.
subparsers = parser.add_subparsers(title='name', dest='name')
parser_parameter_extraction = subparsers.add_parser('parameter_extraction')
parser_parameter_extraction.add_argument('--short_circuit_current', nargs='?', type=float, required=True)
parser_parameter_extraction.add_argument('--open_circuit_voltage', nargs='?', type=float, required=True)
parser_parameter_extraction.add_argument('--maximum_power_point_current', nargs='?', type=float, required=True)
parser_parameter_extraction.add_argument('--maximum_power_point_voltage', nargs='?', type=float, required=True)
parser_parameter_extraction.add_argument('--number_of_cells_in_series', nargs='?', type=float, required=True)
parser_parameter_extraction.add_argument('--series_resistance_estimate', nargs='?', type=float, default=1)
parser_parameter_extraction.add_argument('--shunt_resistance_estimate', nargs='?', type=float, default=1000)
parser_parameter_extraction.add_argument('--diode_quality_factor_estimate', nargs='?', type=float, default=1)
# Note: Calls execute_parameter_extraction function with the arguments:
parser_parameter_extraction.set_defaults(func=execute_parameter_extraction)
args = parser.parse_args()
# Calls the function specified in "set_defaults" method:
args.func(args)
def execute_parameter_extraction(args):
from photovoltaic_modeling.parameter_extraction import ParameterExtraction
parameter_extraction = ParameterExtraction(args.short_circuit_current, args.open_circuit_voltage,
args.maximum_power_point_current, args.maximum_power_point_voltage,
number_of_cells_in_series = args.number_of_cells_in_series)
parameter_estimates = [args.series_resistance_estimate, args.shunt_resistance_estimate, args.diode_quality_factor_estimate]
parameter_extraction.calculate(parameter_estimates)
print('series_resistance=', parameter_extraction.series_resistance)
print('shunt_resistance=', parameter_extraction.shunt_resistance)
print('diode_quality_factor=', parameter_extraction.diode_quality_factor)
if __name__ == "__main__":
main()
|
py
|
1a5ddeabc00f7a5d61c9067d70004f6af3759ec3
|
from pathlib import Path
import subprocess
data_dir = Path('out_amif_4_200_19')
vv_tif = sorted(data_dir.rglob('*_VV.mli.filt.tif'))
vh_tif = sorted(data_dir.rglob('*_VH.mli.filt.tif'))
for i in range(len(vv_tif)):
out_tif = data_dir / str(vv_tif[i].name).replace('_VV.mli', '_RGB.mli')
print(i, out_tif)
cmd = (f'gdal_merge.py '
f'-init "0 0 0" '
f'-separate '
f'-co COMPRESS=LZW '
f'-o {out_tif} {vv_tif[i]} {vh_tif[i]}')
subprocess.call(cmd, shell=True)
|
py
|
1a5ddf32410f38307fcaeb738a11b337bddb95e2
|
"""
Copyright (c) 2020, VRAI Labs and/or its affiliates. All rights reserved.
This software is licensed under the Apache License, Version 2.0 (the
"License") as published by the Apache Software Foundation.
You may not use this file except in compliance with the License. You may
obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from os import environ
from supertokens_fastapi.exceptions import raise_general_exception
class ProcessState:
__instance = None
def __init__(self):
self.service_called = False
@staticmethod
def __get_instance():
if ProcessState.__instance is None:
ProcessState.__instance = ProcessState()
return ProcessState.__instance
@staticmethod
def update_service_called(b):
instance = ProcessState.__get_instance()
instance.service_called = b
@staticmethod
def get_service_called():
return ProcessState.__get_instance().service_called
@staticmethod
def reset():
if ('SUPERTOKENS_ENV' not in environ) or (
environ['SUPERTOKENS_ENV'] != 'testing'):
raise_general_exception(
'calling testing function in non testing env')
ProcessState.__instance = None
|
py
|
1a5ddfa5abf73eb0ad38b15c322e2778634f5036
|
# Generated by Django 3.1.3 on 2021-03-28 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=200)),
('content', models.TextField()),
('create_date', models.DateTimeField()),
],
),
]
|
py
|
1a5de001536f04faf6ab6a550a720964b72bc4d0
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 28 19:53:06 2017
@author: Nadiar
"""
def flatten(aList):
# if empty list
if (aList == []):
return []
# if not list
if (type(aList) is not list):
return [aList]
# if aList is list type
if (type(aList) is list):
return flatten(aList[0]) + flatten(aList[1:])
def how_many(aDict):
'''
aDict: A dictionary, where all the values are lists.
returns: int, how many values are in the dictionary.
'''
return len(flatten(list(aDict.values())))
|
py
|
1a5de01a27c399df1a5651cebcc1ebf44ebcb794
|
#!/usr/bin/env python
#
# Fermatum - lightweight IoP client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses functions from TLSLite (public domain)
#
# TLSLite Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
"""Pure-Python RSA implementation."""
from __future__ import print_function
import os
import math
import base64
import binascii
import hashlib
from pem import *
def SHA1(x):
return hashlib.sha1(x).digest()
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
class RSAKey(object):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def __len__(self):
"""Return the length of this key in bits.
@rtype: int
"""
return numBits(self.n)
def hasPrivateKey(self):
return self.d != 0
def hashAndSign(self, bytes):
"""Hash and sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1-SHA1 signature on the passed-in data.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1-SHA1 signature on the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes)
sigBytes = self.sign(prefixedHashBytes)
return sigBytes
def hashAndVerify(self, sigBytes, bytes):
"""Hash and verify the passed-in bytes with the signature.
This verifies a PKCS1-SHA1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1-SHA1 signature.
@type bytes: str or L{bytearray} of unsigned bytes
@param bytes: The value which will be hashed and verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
hashBytes = SHA1(bytearray(bytes))
# Try it with/without the embedded NULL
prefixedHashBytes1 = self._addPKCS1SHA1Prefix(hashBytes, False)
prefixedHashBytes2 = self._addPKCS1SHA1Prefix(hashBytes, True)
result1 = self.verify(sigBytes, prefixedHashBytes1)
result2 = self.verify(sigBytes, prefixedHashBytes2)
return (result1 or result2)
def sign(self, bytes):
"""Sign the passed-in bytes.
This requires the key to have a private component. It performs
a PKCS1 signature on the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be signed.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 signature on the passed-in data.
"""
if not self.hasPrivateKey():
raise AssertionError()
paddedBytes = self._addPKCS1Padding(bytes, 1)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPrivateKeyOp(m)
sigBytes = numberToByteArray(c, numBytes(self.n))
return sigBytes
def verify(self, sigBytes, bytes):
"""Verify the passed-in bytes with the signature.
This verifies a PKCS1 signature on the passed-in data.
@type sigBytes: L{bytearray} of unsigned bytes
@param sigBytes: A PKCS1 signature.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be verified.
@rtype: bool
@return: Whether the signature matches the passed-in data.
"""
if len(sigBytes) != numBytes(self.n):
return False
paddedBytes = self._addPKCS1Padding(bytes, 1)
c = bytesToNumber(sigBytes)
if c >= self.n:
return False
m = self._rawPublicKeyOp(c)
checkBytes = numberToByteArray(m, numBytes(self.n))
return checkBytes == paddedBytes
def encrypt(self, bytes):
"""Encrypt the passed-in bytes.
This performs PKCS1 encryption of the passed-in data.
@type bytes: L{bytearray} of unsigned bytes
@param bytes: The value which will be encrypted.
@rtype: L{bytearray} of unsigned bytes.
@return: A PKCS1 encryption of the passed-in data.
"""
paddedBytes = self._addPKCS1Padding(bytes, 2)
m = bytesToNumber(paddedBytes)
if m >= self.n:
raise ValueError()
c = self._rawPublicKeyOp(m)
encBytes = numberToByteArray(c, numBytes(self.n))
return encBytes
def decrypt(self, encBytes):
"""Decrypt the passed-in bytes.
This requires the key to have a private component. It performs
PKCS1 decryption of the passed-in data.
@type encBytes: L{bytearray} of unsigned bytes
@param encBytes: The value which will be decrypted.
@rtype: L{bytearray} of unsigned bytes or None.
@return: A PKCS1 decryption of the passed-in data or None if
the data is not properly formatted.
"""
if not self.hasPrivateKey():
raise AssertionError()
if len(encBytes) != numBytes(self.n):
return None
c = bytesToNumber(encBytes)
if c >= self.n:
return None
m = self._rawPrivateKeyOp(c)
decBytes = numberToByteArray(m, numBytes(self.n))
#Check first two bytes
if decBytes[0] != 0 or decBytes[1] != 2:
return None
#Scan through for zero separator
for x in range(1, len(decBytes)-1):
if decBytes[x]== 0:
break
else:
return None
return decBytes[x+1:] #Return everything after the separator
# **************************************************************************
# Helper Functions for RSA Keys
# **************************************************************************
def _addPKCS1SHA1Prefix(self, bytes, withNULL=True):
# There is a long history of confusion over whether the SHA1
# algorithmIdentifier should be encoded with a NULL parameter or
# with the parameter omitted. While the original intention was
# apparently to omit it, many toolkits went the other way. TLS 1.2
# specifies the NULL should be included, and this behavior is also
# mandated in recent versions of PKCS #1, and is what tlslite has
# always implemented. Anyways, verification code should probably
# accept both. However, nothing uses this code yet, so this is
# all fairly moot.
if not withNULL:
prefixBytes = bytearray(\
[0x30,0x1f,0x30,0x07,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x04,0x14])
else:
prefixBytes = bytearray(\
[0x30,0x21,0x30,0x09,0x06,0x05,0x2b,0x0e,0x03,0x02,0x1a,0x05,0x00,0x04,0x14])
prefixedBytes = prefixBytes + bytes
return prefixedBytes
def _addPKCS1Padding(self, bytes, blockType):
padLength = (numBytes(self.n) - (len(bytes)+3))
if blockType == 1: #Signature padding
pad = [0xFF] * padLength
elif blockType == 2: #Encryption padding
pad = bytearray(0)
while len(pad) < padLength:
padBytes = getRandomBytes(padLength * 2)
pad = [b for b in padBytes if b != 0]
pad = pad[:padLength]
else:
raise AssertionError()
padding = bytearray([0,blockType] + pad + [0])
paddedBytes = padding + bytes
return paddedBytes
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self):
return False
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits//2, False)
q = getRandomPrime(bits//2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 65537
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
|
py
|
1a5de0b4fbaadc49fcd45bb9e8c5016e5f170f7e
|
import networkx as nx
from django.db import connection
from api.models import Person
def build_social_graph(user):
query = """
with face as (
select photo_id, person_id, name
from api_face join api_person on api_person.id = person_id
where person_label_is_inferred = false
)
select f1.name, f2.name
from face f1 join face f2 using (photo_id)
where f1.person_id != f2.person_id
group by f1.name, f2.name
"""
G = nx.Graph()
with connection.cursor() as cursor:
cursor.execute(query)
links = cursor.fetchall()
if len(links) == 0:
return {"nodes": [], "links": []}
for link in links:
G.add_edge(link[0], link[1])
pos = nx.spring_layout(G, k=1 / 2, scale=1000, iterations=20)
return {
"nodes": [{"id": node, "x": pos[0], "y": pos[1]} for node, pos in pos.items()],
"links": [{"source": pair[0], "target": pair[1]} for pair in G.edges()],
}
def build_ego_graph(person_id):
G = nx.Graph()
person = Person.objects.prefetch_related("faces__photo__faces__person").filter(
id=person_id
)[0]
for this_person_face in person.faces.all():
for other_person_face in this_person_face.photo.faces.all():
G.add_edge(person.name, other_person_face.person.name)
nodes = [{"id": node} for node in G.nodes()]
links = [{"source": pair[0], "target": pair[1]} for pair in G.edges()]
res = {"nodes": nodes, "links": links}
return res
|
py
|
1a5de0cd0e8df920861697fb6b54ea029fc535da
|
from django.conf import settings
from django.core import mail
from django.core.mail.backends.base import BaseEmailBackend
import olympia.core.logger
from olympia.amo.models import FakeEmail
log = olympia.core.logger.getLogger('z.amo.mail')
class DevEmailBackend(BaseEmailBackend):
"""Log emails in the database, send allowed addresses for real though.
Used for development environments when we don't want to send out
real emails. This gets swapped in as the email backend when
`settings.SEND_REAL_EMAIL` is disabled.
BUT even if `settings.SEND_REAL_EMAIL` is disabled, if the targeted
email address is in the `settings.EMAIL_QA_ALLOW_LIST` list,
the email will be sent.
"""
def send_messages(self, messages):
"""Save a `FakeEmail` object viewable within the admin.
If one of the target email addresses is in
`settings.EMAIL_QA_ALLOW_LIST`, it send a real email message.
"""
log.debug('Sending dev mail messages.')
qa_messages = []
for msg in messages:
FakeEmail.objects.create(message=msg.message().as_string())
qa_emails = set(msg.to).intersection(settings.EMAIL_QA_ALLOW_LIST)
if qa_emails:
if len(msg.to) != len(qa_emails):
# We need to replace the recipients with the QA
# emails only prior to send the message for real.
# We don't want to send real emails to people if
# they happen to also be in the recipients together
# with white-listed emails
msg.to = list(qa_emails)
qa_messages.append(msg)
if qa_messages:
log.debug('Sending real mail messages to QA.')
connection = mail.get_connection()
connection.send_messages(qa_messages)
return len(messages)
def view_all(self):
"""Useful for displaying messages in admin panel."""
return (FakeEmail.objects.values_list('message', flat=True)
.order_by('-created'))
def clear(self):
return FakeEmail.objects.all().delete()
|
py
|
1a5de22075500e4ecea7fe630c1f370a626231e2
|
#!/usr/bin/env python
#*
#* The MIT License
#*
#* Copyright 2012 Georgios Migdos <[email protected]>.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* The above copyright notice and this permission notice shall be included in
#* all copies or substantial portions of the Software.
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#*
from gi.repository import Gtk, GtkSource
import sys
if len(sys.argv)!=5 :
print '''
Wrong number of parameters:
Correct usage :
python source_printer.py <language_id> <scheme_id> <font_description> <filename>
e.g.
python source_printer.py java classic "Liberation Mono 10" ~/test/Hello_World.java
Parameters:
language_id : The GtkSourceView language definition id.
scheme_id : The GtkSourceView style scheme definition id.
font_description : The Pango font description string (font-family
and size seperated by a space character).
filename : The file to print.
'''
exit()
def begin_print(operation, context, p):
print "Initializing printing process..."
while(not p.paginate(context)):
pass
n_pages = p.get_n_pages()
operation.set_n_pages (n_pages);
print "Sending", n_pages, "to printer"
def end_print(operation, context):
print 'Document sent to printer.'
def draw_page(operation, context, page_nr, p):
print 'Sending page:', (page_nr+1)
p.draw_page (context,page_nr)
fname = sys.argv[4]
lang = GtkSource.LanguageManager.get_default().get_language(sys.argv[1])
scheme = GtkSource.StyleSchemeManager.get_default().get_scheme(sys.argv[2])
buf = GtkSource.Buffer()
buf.set_language(lang)
buf.set_style_scheme(scheme)
f = open(fname, 'r')
buf.set_text(f.read())
f.close()
p = GtkSource.PrintCompositor.new(buf)
p.set_print_line_numbers(1)
p.set_body_font_name(sys.argv[3])
p.set_line_numbers_font_name(sys.argv[3])
p.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
p.set_left_margin(20, Gtk.Unit.MM)
p.set_right_margin(20, Gtk.Unit.MM)
p.set_top_margin(20, Gtk.Unit.MM)
p.set_bottom_margin(30, Gtk.Unit.MM)
op = Gtk.PrintOperation()
op.connect("draw_page", draw_page, p)
op.connect("begin-print", begin_print, p)
op.connect("end-print", end_print)
op.run(Gtk.PrintOperationAction.PRINT_DIALOG, None)
|
py
|
1a5de2cd32a42576d7aa2467d911a1f38a1f162c
|
# project/server/config.py
import os
basedir = os.path.abspath(os.path.dirname(__file__))
database_name = 'transfuzol'
if 'DATABASE_URL' in os.environ:
DB_URL = os.environ['DATABASE_URL']
else:
DB_NAME = 'transfuzol'
DB_USER = 'postgres'
DB_PASSWORD = 'barister'
DB_HOST = 'localhost'
DB_PORT = '5432'
DB_URL = 'postgresql://' + DB_USER + ':' + DB_PASSWORD + '@' + DB_HOST + ':' + DB_PORT + '/' + DB_NAME
class BaseConfig:
"""Base configuration."""
SECRET_KEY = os.getenv('SECRET_KEY', 'my_precious')
DEBUG = False
BCRYPT_LOG_ROUNDS = 13
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = DB_URL
class DevelopmentConfig(BaseConfig):
"""Development configuration."""
DEBUG = True
BCRYPT_LOG_ROUNDS = 4
class TestingConfig(BaseConfig):
"""Testing configuration."""
DEBUG = True
TESTING = True
BCRYPT_LOG_ROUNDS = 4
PRESERVE_CONTEXT_ON_EXCEPTION = False
class ProductionConfig(BaseConfig):
"""Production configuration."""
SECRET_KEY = 'my_precious'
DEBUG = False
|
py
|
1a5de353d7dc0f58ac3d259bf40e02d1a6dc6b54
|
import pymongo
import pytest
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
@pytest.fixture(scope="module")
def started_cluster(request):
try:
cluster = ClickHouseCluster(__file__)
node = cluster.add_instance('node',
main_configs=["configs_secure/config.d/ssl_conf.xml", "configs/named_collections.xml"],
with_mongo=True,
with_mongo_secure=request.param)
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_mongo_connection(started_cluster, secure=False):
connection_str = 'mongodb://root:clickhouse@localhost:{}'.format(started_cluster.mongo_port)
if secure:
connection_str += '/?tls=true&tlsAllowInvalidCertificates=true'
return pymongo.MongoClient(connection_str)
@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster'])
def test_simple_select(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection['test']
db.add_user('root', 'clickhouse')
simple_mongo_table = db['simple_table']
data = []
for i in range(0, 100):
data.append({'key': i, 'data': hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances['node']
node.query(
"CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse')")
assert node.query("SELECT COUNT() FROM simple_mongo_table") == '100\n'
assert node.query("SELECT sum(key) FROM simple_mongo_table") == str(sum(range(0, 100))) + '\n'
assert node.query("SELECT data from simple_mongo_table where key = 42") == hex(42 * 42) + '\n'
node.query("DROP TABLE simple_mongo_table")
simple_mongo_table.drop()
@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster'])
def test_complex_data_type(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection['test']
db.add_user('root', 'clickhouse')
incomplete_mongo_table = db['complex_table']
data = []
for i in range(0, 100):
data.append({'key': i, 'data': hex(i * i), 'dict': {'a': i, 'b': str(i)}})
incomplete_mongo_table.insert_many(data)
node = started_cluster.instances['node']
node.query(
"CREATE TABLE incomplete_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'complex_table', 'root', 'clickhouse')")
assert node.query("SELECT COUNT() FROM incomplete_mongo_table") == '100\n'
assert node.query("SELECT sum(key) FROM incomplete_mongo_table") == str(sum(range(0, 100))) + '\n'
assert node.query("SELECT data from incomplete_mongo_table where key = 42") == hex(42 * 42) + '\n'
node.query("DROP TABLE incomplete_mongo_table")
incomplete_mongo_table.drop()
@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster'])
def test_incorrect_data_type(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection['test']
db.add_user('root', 'clickhouse')
strange_mongo_table = db['strange_table']
data = []
for i in range(0, 100):
data.append({'key': i, 'data': hex(i * i), 'aaaa': 'Hello'})
strange_mongo_table.insert_many(data)
node = started_cluster.instances['node']
node.query(
"CREATE TABLE strange_mongo_table(key String, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')")
with pytest.raises(QueryRuntimeException):
node.query("SELECT COUNT() FROM strange_mongo_table")
with pytest.raises(QueryRuntimeException):
node.query("SELECT uniq(key) FROM strange_mongo_table")
node.query(
"CREATE TABLE strange_mongo_table2(key UInt64, data String, bbbb String) ENGINE = MongoDB('mongo1:27017', 'test', 'strange_table', 'root', 'clickhouse')")
with pytest.raises(QueryRuntimeException):
node.query("SELECT bbbb FROM strange_mongo_table2")
node.query("DROP TABLE strange_mongo_table")
node.query("DROP TABLE strange_mongo_table2")
strange_mongo_table.drop()
@pytest.mark.parametrize('started_cluster', [True], indirect=['started_cluster'])
def test_secure_connection(started_cluster):
mongo_connection = get_mongo_connection(started_cluster, secure=True)
db = mongo_connection['test']
db.add_user('root', 'clickhouse')
simple_mongo_table = db['simple_table']
data = []
for i in range(0, 100):
data.append({'key': i, 'data': hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances['node']
node.query(
"CREATE TABLE simple_mongo_table(key UInt64, data String) ENGINE = MongoDB('mongo1:27017', 'test', 'simple_table', 'root', 'clickhouse', 'ssl=true')")
assert node.query("SELECT COUNT() FROM simple_mongo_table") == '100\n'
assert node.query("SELECT sum(key) FROM simple_mongo_table") == str(sum(range(0, 100))) + '\n'
assert node.query("SELECT data from simple_mongo_table where key = 42") == hex(42 * 42) + '\n'
node.query("DROP TABLE simple_mongo_table")
simple_mongo_table.drop()
@pytest.mark.parametrize('started_cluster', [False], indirect=['started_cluster'])
def test_predefined_connection_configuration(started_cluster):
mongo_connection = get_mongo_connection(started_cluster)
db = mongo_connection['test']
db.add_user('root', 'clickhouse')
simple_mongo_table = db['simple_table']
data = []
for i in range(0, 100):
data.append({'key': i, 'data': hex(i * i)})
simple_mongo_table.insert_many(data)
node = started_cluster.instances['node']
node.query("create table simple_mongo_table(key UInt64, data String) engine = MongoDB(mongo1)")
simple_mongo_table.drop()
|
py
|
1a5de3c9cf52aa6d93a5a5cdc0bfb14eb6807d44
|
from __future__ import annotations
import os
import signal
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
import pexpect
from cleo.terminal import Terminal
from shellingham import ShellDetectionFailure
from shellingham import detect_shell
from poetry.utils._compat import WINDOWS
if TYPE_CHECKING:
from poetry.utils.env import VirtualEnv
class Shell:
"""
Represents the current shell.
"""
_shell = None
def __init__(self, name: str, path: str) -> None:
self._name = name
self._path = path
@property
def name(self) -> str:
return self._name
@property
def path(self) -> str:
return self._path
@classmethod
def get(cls) -> Shell:
"""
Retrieve the current shell.
"""
if cls._shell is not None:
return cls._shell
try:
name, path = detect_shell(os.getpid())
except (RuntimeError, ShellDetectionFailure):
shell = None
if os.name == "posix":
shell = os.environ.get("SHELL")
elif os.name == "nt":
shell = os.environ.get("COMSPEC")
if not shell:
raise RuntimeError("Unable to detect the current shell.")
name, path = Path(shell).stem, shell
cls._shell = cls(name, path)
return cls._shell
def activate(self, env: VirtualEnv) -> int | None:
activate_script = self._get_activate_script()
bin_dir = "Scripts" if WINDOWS else "bin"
activate_path = env.path / bin_dir / activate_script
# mypy requires using sys.platform instead of WINDOWS constant
# in if statements to properly type check on Windows
if sys.platform == "win32":
if self._name in ("powershell", "pwsh"):
args = ["-NoExit", "-File", str(activate_path)]
else:
# /K will execute the bat file and
# keep the cmd process from terminating
args = ["/K", str(activate_path)]
completed_proc = subprocess.run([self.path, *args])
return completed_proc.returncode
import shlex
terminal = Terminal()
with env.temp_environ():
c = pexpect.spawn(
self._path, ["-i"], dimensions=(terminal.height, terminal.width)
)
if self._name == "zsh":
c.setecho(False)
c.sendline(f"{self._get_source_command()} {shlex.quote(str(activate_path))}")
def resize(sig: Any, data: Any) -> None:
terminal = Terminal()
c.setwinsize(terminal.height, terminal.width)
signal.signal(signal.SIGWINCH, resize)
# Interact with the new shell.
c.interact(escape_character=None)
c.close()
sys.exit(c.exitstatus)
def _get_activate_script(self) -> str:
if self._name == "fish":
suffix = ".fish"
elif self._name in ("csh", "tcsh"):
suffix = ".csh"
elif self._name in ("powershell", "pwsh"):
suffix = ".ps1"
elif self._name == "cmd":
suffix = ".bat"
else:
suffix = ""
return "activate" + suffix
def _get_source_command(self) -> str:
if self._name in ("fish", "csh", "tcsh"):
return "source"
return "."
def __repr__(self) -> str:
return f'{self.__class__.__name__}("{self._name}", "{self._path}")'
|
py
|
1a5de41e9dc26a81480cfdd1eef209a913a3ace3
|
from pydantic import BaseModel
from fastapi import APIRouter
from fastapi.responses import JSONResponse
import datetime
import bcrypt
import jwt
from config import db, SECRET_KEY
router = APIRouter(prefix='/api')
account_collection = db.get_collection('accounts')
class Login(BaseModel):
username: str
password: str
ip: str
@router.post('/login')
async def login(login: Login):
try:
account = account_collection.find_one({'username': login.username})
if account:
if account['banned']:
return JSONResponse({'message': 'account is banned', 'success': False}, status_code=403)
if bcrypt.checkpw(
login.password.encode('utf-8'),
account['password'].encode('utf-8')
):
token = jwt.encode({
'_id': account['_id'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=24)
}, SECRET_KEY, algorithm='HS256')
account_collection.update_one(
{'_id': account['_id']},
{
'$set': {
'token': token, 'ip': login.ip,
'updatedAt': datetime.datetime.utcnow(),
'lastLogin': datetime.datetime.utcnow()
}
}
)
return JSONResponse(
{'message': 'successfully logged in', 'success': True, 'token': token},
status_code=200
)
else:
return JSONResponse({'message': 'incorrect password', 'success': False}, status_code=401)
else:
return JSONResponse({'message': 'account not found', 'success': False}, status_code=404)
except jwt.exceptions.DecodeError:
return JSONResponse({'message': 'invalid token', 'success': False}, status_code=401)
except jwt.exceptions.ExpiredSignatureError:
return JSONResponse({'message': 'token expired', 'success': False}, status_code=401)
except Exception as e:
return JSONResponse(
{'message': 'unknown error', 'error': str(e), 'success': False}, status_code=500
)
|
py
|
1a5de67eb8e2de9e07ad341adcae4e84f1627faf
|
"""
Django settings for django_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '7evwci)je7e4b+lc47yyf9zyyjt2raz-7id@97=ln!co5%!n%='
SECRET_KEY = 'tk)=fv52@lm273+6g7nink%b@pw9m@qldc2$ots8_h+xd0m53='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'snowpenguin.django.recaptcha3',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
SITE_ID = 1
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': [
'profile',
'email',
],
'AUTH_PARAMS': {
'access_type': 'online',
}
}
}
RECAPTCHA_PUBLIC_KEY = config('RECAPTCAR_KEY')
RECAPTCHA_PRIVATE_KEY = config('RECAPTCAR_KEY_SECRET')
RECAPTCHA_DEFAULT_ACTION = 'generic'
RECAPTCHA_SCORE_THRESHOLD = 0.5
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'blog-home'
LOGIN_URL = 'login'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = config('ACCOUNT')
EMAIL_HOST_PASSWORD = config('PASSOWRD')
EMAIL_PORT = 587
django_heroku.settings(locals())
|
py
|
1a5de6947f6846b56e37195e6934bd596d1716fe
|
# author: WatchDogOblivion
# description: TODO
# WatchDogs Blind SQL Request Response Service
# pylint: disable=R0904
import time
import copy
from typing import Callable # pylint: disable=unused-import
from collections import OrderedDict
from multiprocessing.pool import ThreadPool
from pathos.multiprocessing import ProcessingPool
from watchdogs.base.models import AllArgs # pylint: disable=unused-import
from watchdogs.utils import GeneralUtility
from watchdogs.web.models import BlindSQLRRHelper # pylint: disable=unused-import
from watchdogs.web.models.Requests import Request # pylint: disable=unused-import
from watchdogs.web.parsers import BlindSQLArgs
from watchdogs.web.services import RequestResponseService
from watchdogs.utils.Constants import (COUNT, LENGTH, NAME, VALUE, VERSION)
from watchdogs.web.webutils.BlindSQLRRQueries import CIPH, ODPH, ORPH, RIPH, BlindSQLRRQueries
KEY_MAP = OrderedDict([(VERSION, '@@version'), (NAME, 'database()')])
OPERAND_LIMIT = 10000
ERROR = -999
ERROR_STRING = '-999'
EXCEED_MSG = "Exceeded maximum operand limit."
class BlindSQLRRService(RequestResponseService):
@staticmethod
def resetEndpoint(request, originalEndpoint):
# type: (Request, str) -> None
requestInfo = request.getRequestInfo()
requestInfo.setEndpoint(originalEndpoint)
request.setRequestInfo(requestInfo)
return request
@staticmethod
def updateEndpoint(request, query):
# type: (Request, str) -> Request
requestInfo = request.getRequestInfo()
requestInfo.setEndpoint(requestInfo.getEndpoint() + query)
request.setRequestInfo(requestInfo)
return request
@staticmethod
def getMultiprocessingArgs(allArgs, request, helper, argumentSize, startIndex=0):
# type: (AllArgs, Request, BlindSQLRRHelper, int, int) -> tuple
allArgsArray = []
requestArray = []
helperArray = []
index = 0
while index < argumentSize:
allArgsArray.append(allArgs)
requestArray.append(request)
helperArray.append(helper)
index += 1
return (allArgsArray, requestArray, helperArray, range(startIndex, argumentSize))
@staticmethod
def multithread(method, allArgs, request, helper, processes):
# type: (Callable, AllArgs, Request, BlindSQLRRHelper, int) -> str
jobs = []
results = []
pool = ThreadPool(processes=processes)
for index in range(processes):
jobs.append(pool.apply_async(method, (allArgs, request, copy.deepcopy(helper), index)))
for job in jobs:
try:
results.append(job.get())
except Exception as e:
results.append(e)
return results
def multiprocess(self, method, allArgs, request, helper, argumentSize, startIndex=0):
# type: (Callable, AllArgs, Request, BlindSQLRRHelper, int, int) -> str
blindSQLArgs = allArgs.getArgs(BlindSQLArgs)
pool = ProcessingPool(blindSQLArgs.processPoolSize)
args = self.getMultiprocessingArgs(allArgs, request, helper, argumentSize, startIndex)
return pool.map(method, *args)
def getDbCharacterInteger(self, allArgs, request, helper, index):
# type: (AllArgs, Request, BlindSQLRRHelper, int) -> int
helper.setCharacterIndex(index)
helper.setQueryOperand(0)
integerValue = self.operandBinarySearch(allArgs, request, helper)
return integerValue
def getDatabaseValue(self, allArgs, request, helper, dbValueLength):
# type: (AllArgs, Request, BlindSQLRRHelper, int) -> str
dbCharacterIntegers = self.multiprocess(self.getDbCharacterInteger, allArgs, request, helper,
dbValueLength)
if (ERROR in dbCharacterIntegers):
return ERROR_STRING
nullByteCount = 0
databaseValue = ""
for characterInteger in dbCharacterIntegers:
if (characterInteger == 0):
nullByteCount += 1
characterValue = chr(characterInteger)
databaseValue += characterValue
if (nullByteCount > 0):
remainingCharacterIntegers = self.multiprocess(self.getDbCharacterInteger, allArgs, request, helper,
nullByteCount + dbValueLength, dbValueLength)
for characterInteger in remainingCharacterIntegers:
characterValue = chr(characterInteger)
databaseValue += characterValue
return databaseValue
def getRowValue(self, allArgs, request, helper, index):
# type: (AllArgs, Request, BlindSQLRRHelper, int) -> str
helper.setRowIndex(index)
helper.setQueryOperand(0)
helper.setIsRowCheck(True)
BlindSQLRRQueries.setQuery(LENGTH, allArgs, helper)
valueLength = self.operandBinarySearch(allArgs, request, helper)
if (valueLength == ERROR):
return ERROR_STRING
helper.setIsRowCharacterCheck(True)
BlindSQLRRQueries.setQuery(VALUE, allArgs, helper)
return self.getDatabaseValue(allArgs, request, helper, valueLength)
def getInvalidResponseLength(self, allArgs, request):
# type: (AllArgs, Request) -> int
blindSQLArgs = allArgs.getArgs(BlindSQLArgs)
query = "{0}{1}AND{1}1=2{2}"
query = query.format(blindSQLArgs.terminator, blindSQLArgs.wordDelimiter, blindSQLArgs.commentOut)
query = GeneralUtility.urlEncode(query)
response = self.sendRequest(allArgs, self.updateEndpoint(request, query))
return int(self.getFinalResponse(response).getResponseLength())
def operatorOperand(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> bool
self.resetEndpoint(request, helper.getOriginalEndpoint())
query = helper.getQuery()
replaced = query.replace(ORPH, helper.getQueryOperator())
replaced = replaced.replace(ODPH, str(helper.getQueryOperand()))
if (helper.isCharacterCheck() or helper.isRowCharacterCheck()):
replaced = replaced.replace(CIPH, str(helper.getCharacterIndex()))
if (helper.isRowCheck() or helper.isRowCharacterCheck()):
replaced = replaced.replace(RIPH, str(helper.getRowIndex()))
query = GeneralUtility.urlEncode(replaced)
response = self.sendRequest(allArgs, self.updateEndpoint(request, query))
responseLength = self.getFinalResponse(response).getResponseLength()
if (int(responseLength) == helper.getInvalidResponseLength()):
return False
return True
def equalsOperand(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> bool
helper.setQueryOperator("=")
return self.operatorOperand(allArgs, request, helper)
def isLessThanOperand(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> bool
helper.setQueryOperator("<")
return self.operatorOperand(allArgs, request, helper)
def operandBinarySearch(self, allArgs, request, helper):
#type:(AllArgs, Request, BlindSQLRRHelper)->int
operand = helper.getQueryOperand()
if (self.equalsOperand(allArgs, request, helper)):
return operand
index = 0
while (True):
if (helper.getQueryOperand() > OPERAND_LIMIT):
return ERROR
helper.setQueryOperand(2**index + operand)
if (self.isLessThanOperand(allArgs, request, helper)):
helper.setQueryOperand(2**(index - 1) + operand)
return self.operandBinarySearch(allArgs, request, helper)
index += 1
def getKeyValue(self, key, allArgs, request, helper):
# type: (str, AllArgs, Request, BlindSQLRRHelper) -> str
if (key == VERSION):
helper.setQueryKey(KEY_MAP[VERSION])
elif (key == NAME):
helper.setQueryKey(KEY_MAP[NAME])
else:
helper.setQueryKey(key)
helper.setQueryOperand(0)
helper.setQuery(BlindSQLRRQueries.getLengthQuery(allArgs, helper))
valueLength = self.operandBinarySearch(allArgs, request, helper)
if (valueLength == ERROR):
return ERROR_STRING
helper.setIsCharacterCheck(True)
helper.setQuery(BlindSQLRRQueries.getValueQuery(allArgs, helper))
return self.getDatabaseValue(allArgs, request, helper, valueLength)
def setDataBaseVersion(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> None
startTime = time.time()
if (not helper.getDatabaseVersion()):
databaseVersion = self.getKeyValue(VERSION, allArgs, request, helper)
if (databaseVersion == ERROR_STRING):
print(EXCEED_MSG)
helper.setDatabaseVersion(databaseVersion)
endTime = time.time()
GeneralUtility.printTime(startTime, endTime)
def setCurrentDatabase(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> None
startTime = time.time()
if (not helper.getDatabaseName()):
databaseName = self.getKeyValue(NAME, allArgs, request, helper)
if (databaseName == ERROR_STRING):
print(EXCEED_MSG)
helper.setDatabaseName(databaseName)
endTime = time.time()
GeneralUtility.printTime(startTime, endTime)
def setDataList(self, allArgs, request, helper):
# type: (AllArgs, Request, BlindSQLRRHelper) -> None
startTime = time.time()
helper.setQueryOperand(0)
BlindSQLRRQueries.setQuery(COUNT, allArgs, helper)
valueCount = self.operandBinarySearch(allArgs, request, helper)
if (valueCount < 1):
print("There were no entries in the database for your request")
helper.setDataList([])
results = self.multithread(self.getRowValue, allArgs, request, helper, valueCount)
if (ERROR_STRING in results):
print(EXCEED_MSG)
endTime = time.time()
GeneralUtility.printTime(startTime, endTime)
helper.setDataList(results)
|
py
|
1a5de70c8e0da9d21eb036dd5da891e0902bdcf6
|
import sys
import iris
from cosmic.util import load_module, filepath_regrid
def main(target_filename, models_settings, model, year, season):
input_dir = models_settings[model]['input_dir']
print(f'{model}, {year}, {season}')
input_filename = input_dir / f'{model}.highresSST-present.r1i1p1f1.{year}.{season}.asia_precip.nc'
output_filename = input_filename.parent / f'{input_filename.stem}.N1280.nc'
done_filename = (output_filename.parent / (output_filename.name + '.done'))
if done_filename.exists():
print(f'Skipping: {done_filename.name} exists')
return
regridded_cube = filepath_regrid(input_filename, target_filename)
iris.save(regridded_cube, str(output_filename), zlib=True)
done_filename.touch()
if __name__ == '__main__':
config = load_module(sys.argv[1])
config_key = sys.argv[2]
main(config.TARGET_FILENAME, config.MODELS, *config.SCRIPT_ARGS[config_key])
|
py
|
1a5de7595d6a1719c195d9552a58c0f184314362
|
# Copyright (c) 2019 NVIDIA Corporation
import torch as t
import torch.nn as nn
from ...core import DeviceType, NeuralModule
from ...utils.helpers import rgetattr, rsetattr
class TrainableNeuralModuleWrapper(NeuralModule, nn.Module):
"""This class wraps an instance of Pytorch's nn.Module and
returns NeuralModule's instance."""
def __init__(self, pt_nn_module, input_ports_dict, output_ports_dict):
NeuralModule.__init__(self)
nn.Module.__init__(self)
self._input_ports = input_ports_dict
self._output_ports = output_ports_dict
self._device = t.device("cuda" if self.placement in [DeviceType.GPU, DeviceType.AllGpu] else "cpu")
self._pt_module = pt_nn_module
self._pt_module.to(self._device)
@property
def input_ports(self):
"""Returns definitions of module input ports.
"""
return self._input_ports
@property
def output_ports(self):
"""Returns definitions of module output ports.
"""
return self._output_ports
# def forward(self, *input):
# return self._pt_module(input)
def eval(self):
return self._pt_module.eval()
def train(self):
return self._pt_module.train()
def __call__(self, force_pt=False, *input, **kwargs):
pt_call = len(input) > 0 or force_pt
if pt_call:
return self._pt_module.__call__(*input, **kwargs)
else:
return NeuralModule.__call__(self, **kwargs)
def get_weights(self):
result = dict()
for name, parameter in self.named_parameters():
result[name] = (parameter, parameter.requires_grad)
return result
def save_to(self, path):
t.save(self._pt_module.state_dict(), path)
def restore_from(self, path):
self._pt_module.load_state_dict(t.load(path))
def parameters(self):
return self._pt_module.parameters()
def named_parameters(self):
return self._pt_module.named_parameters()
def freeze(self, weights=None):
for name, param in self._pt_module.named_parameters():
if weights is None or name in weights:
param.requires_grad = False
def unfreeze(self, weights=None):
for name, param in self._pt_module.named_parameters():
if weights is None or name in weights:
param.requires_grad = True
def get_weights(self):
result = dict()
for name, parameter in self._pt_module.named_parameters():
result[name] = (parameter, parameter.requires_grad)
return result
def set_weights(self, name2weight, name2name_and_transform=None):
self._pt_module.load_state_dict({key: name2weight[key][0] for key in name2weight.keys()})
def tie_weights_with(self, module, weight_names):
for name in weight_names:
rsetattr(self._pt_module, name, nn.Parameter(rgetattr(module, name)))
@property
def num_weights(self):
return sum(p.numel() for p in self._pt_module.parameters() if p.requires_grad)
|
py
|
1a5de8f3982e66a3f429bcdeb4a62217f31f3104
|
"""Evaluation metric implementations for audio synthesis tasks"""
import glob
import warnings
from pathlib import Path
from typing import Any, Callable, Iterable, Optional, Type
import numpy as np
import torch
from scipy.io import wavfile
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
class WavDataSet(Dataset):
"""Torch dataset for wavfile directories"""
def __init__(
self,
samples: str,
labels: Optional[Iterable[Any]] = None,
transform: Optional[Callable[[np.ndarray], Any]] = None,
train: bool = True,
dtype: Type = torch.FloatTensor,
):
"""
Args:
samples: Path to directory containing audio samples (wav files are supported)
transform: Optionally provide a preprocessing function to transform the audio data before predicting the
class with the classifier
dtype: Datatype to cast the loaded numpy array of audio data to (done before passing to transform function)
"""
self.files = glob.glob(os.path.join(Path(samples), "*.wav"))
self.labels = np.array(labels) if labels is not None else None
self.transform = transform if transform is not None else lambda audio: audio
self.train = train
self.dtype = dtype
if not train:
if labels is None:
raise ValueError("Cannot create test dataloader without labels")
if labels.shape[0] != len(self.files):
raise ValueError(
f"The number of labels provided does not match the number of samples, got {labels.shape[0]} labels"
f" and {len(self.files)} samples"
)
def __len__(self):
return len(self.files)
def __getitem__(self, item):
file = self.files[item]
_, audio = wavfile.read(file)
audio = self.transform(torch.from_numpy(audio).to(dtype=self.dtype))
return audio if self.train else audio, self.labels[item]
def _check_cuda(cuda: bool):
# Make sure cuda is available if using cuda
if cuda and not torch.cuda.is_available():
raise EnvironmentError("CUDA set to true but no CUDA enabled device available")
# Warn if cuda is available and not using
if not cuda and torch.cuda.is_available():
warnings.warn("A CUDA enabled device is available, but cuda is not set to True")
def audio_inception_score(
classifier: Callable[..., np.ndarray],
samples: Union[str, Path],
transform: Optional[Callable[[np.ndarray], Any]] = None,
batch_size: int = 4,
splits: int = 10,
n_classes: int = 10,
shuffle: bool = True,
cuda: bool = True,
) -> np.ndarray:
"""Inception score implementation adapted for audio synthesis performance evaluation
Based on https://github.com/openai/improved-gan/blob/master/inception_score/model.py
From Improved Techniques for Training GANs (Goodfellow, 2016) https://arxiv.org/pdf/1606.03498.pdf
Args:
classifier: Classification model (in evaluation mode) which classifies an audio sample into <n_classes> by
computing confidence scores for each class, for each sample
samples: Path to directory containing audio samples (wav files are supported)
transform: Optionally provide a preprocessing function to transform the audio data before predicting the class
with the classifier
batch_size: Integer representing the number of samples to predict on in each iteration
splits: Integer representing the number of splits to chunk the predictions into, producing an inception score
for each chunk
n_classes: The number of classes predicted by the classification model
shuffle: Boolean flag, whether or not to shuffle the dataset
cuda: Boolean flag, whether or not to use a CUDA device for the classification model
Returns:
<splits> x 1 np.ndarray containing the computed inception score for each split
"""
_check_cuda(cuda)
dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor # CUDA type if on cuda
dataloader = DataLoader(
WavDataSet(samples, None, transform, False, dtype), batch_size=batch_size, shuffle=shuffle, num_workers=0
)
# Must have >= 1 sample per split
n = len(dataloader.dataset)
if splits > n:
raise ValueError(f"Cannot compute inception score for {splits} splits from only {n} samples")
# Process classification predictions in batches
predictions = np.empty((n, n_classes), dtype=np.float64)
for i, batch in enumerate(dataloader):
preds = classifier(batch)
preds = F.softmax(preds).data.cpu().numpy()
predictions[i * batch_size : (i + 1) * batch_size] = preds
# Compute inception scores
scores = np.empty(splits, dtype=np.float64)
split_size = n // splits
for i in range(splits):
preds_split = predictions[i * split_size : (i + 1) * split_size]
kl = preds_split * (np.log(preds_split) - np.log(np.expand_dims(np.mean(preds_split, axis=0), axis=0)))
kl = np.exp(np.mean(np.sum(kl, axis=1)))
scores[i] = kl
return scores
def pitch_accuracy_entropy(
classifier: Callable[..., np.ndarray],
samples: str,
labels: np.ndarray,
transform: Optional[Callable[[np.ndarray], Any]] = None,
batch_size: int = 4,
shuffle: bool = True,
cuda: bool = True,
):
"""Implementation of pitch accuracy and pitch entropy as described in GANSynth: Adversarial Neural Audio Synthesis
(Engel, 2019) https://arxiv.org/abs/1902.08710
Args:
classifier: Classification model (in evaluation mode) which classifies an audio sample into <n_classes> by
computing confidence scores for each class, for each sample
samples: Path to directory containing audio samples (wav files are supported)
labels: Numpy array of integers representing the true label for each corresponding sample (index of label)
transform: Optionally provide a preprocessing function to transform the audio data before predicting the class
with the classifier
batch_size: Integer representing the number of samples to predict on in each iteration
shuffle: Boolean flag, whether or not to shuffle the dataset
cuda: Boolean flag, whether or not to use a CUDA device for the classification model
Returns:
<splits> x 1 np.ndarray containing the computed inception score for each split
"""
_check_cuda(cuda)
dtype = torch.cuda.FloatTensor if cuda else torch.FloatTensor # CUDA type if on cuda
dataloader = DataLoader(
WavDataSet(samples, labels, transform, True, dtype), batch_size=batch_size, shuffle=shuffle, num_workers=0
)
predictions = np.empty(len(dataloader.dataset), dtype=np.int32)
for i, batch in enumerate(dataloader):
preds = classifier(batch)
preds = F.argmax(preds).data.cpu().numpy()
predictions[i * batch_size : (i + 1) * batch_size] = preds
probs = np.array([(predictions == i).mean() for i in range(labels.min(), labels.max() + 1)], dtype=np.float64)
return (labels == predictions).mean(), -(probs @ np.log(probs)) # Compute accuracy and entropy of predictions
|
py
|
1a5de98a5fe3ec0bf277922ddab520ef09999054
|
"""
This file contain wraps function that will check the input of the general function.
will raise exceptions if the input is not valid.
This file is using the generic check input class 'CheckInput' that contains all the
input validation to all the functions
Created by: Nir Barazida
Good luck
"""
from functools import wraps
from NBprocessing.src._check_input import _CheckInput
class _InputCheckGeneral(object):
"""
This class contain wraps function that will check the input of the general function.
will raise exceptions if the input is not valid.
This class is using the generic check input class 'CheckInput' that contains all the
input validation to all the functions
"""
@staticmethod
def _missing_values_checker(func):
"""
Wrapper function to validate the input for method 'missing_values'
Will raise Exception if input incorrect
"""
@wraps(func)
def wrapper_checker(database):
_CheckInput._check_database_input(database)
return func(database)
return wrapper_checker
@staticmethod
def _split_and_check_checker(func):
"""
Wrapper function to validate the input for method 'split_and_check'
Will raise Exception if input incorrect
"""
@wraps(func)
def wrapper_checker(database, column_name, test_size=0.3):
_CheckInput._check_database_input(database)
_CheckInput._check_column_in_database(column_name, database)
_CheckInput._check_threshold(test_size)
return func(database, column_name, test_size=0.3)
return wrapper_checker
|
py
|
1a5de9930fa594ec4b91dbf5aac8af2f45ed9f9b
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from gcp_devrel.testing import eventually_consistent
from gcp_devrel.testing.flaky import flaky
from google.cloud import pubsub_v1
import google.api_core.exceptions
import mock
import pytest
import subscriber
PROJECT = os.environ['GCLOUD_PROJECT']
TOPIC = 'subscription-test-topic'
SUBSCRIPTION = 'subscription-test-subscription'
SUBSCRIPTION_SYNC1 = 'subscription-test-subscription-sync1'
SUBSCRIPTION_SYNC2 = 'subscription-test-subscription-sync2'
ENDPOINT = 'https://{}.appspot.com/push'.format(PROJECT)
NEW_ENDPOINT = 'https://{}.appspot.com/push2'.format(PROJECT)
@pytest.fixture(scope='module')
def publisher_client():
yield pubsub_v1.PublisherClient()
@pytest.fixture(scope='module')
def topic(publisher_client):
topic_path = publisher_client.topic_path(PROJECT, TOPIC)
try:
publisher_client.delete_topic(topic_path)
except Exception:
pass
publisher_client.create_topic(topic_path)
yield topic_path
@pytest.fixture(scope='module')
def subscriber_client():
yield pubsub_v1.SubscriberClient()
@pytest.fixture
def subscription(subscriber_client, topic):
subscription_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION)
try:
subscriber_client.delete_subscription(subscription_path)
except Exception:
pass
try:
subscriber_client.create_subscription(subscription_path, topic=topic)
except google.api_core.exceptions.AlreadyExists:
pass
yield subscription_path
@pytest.fixture
def subscription_sync1(subscriber_client, topic):
subscription_sync_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION_SYNC1)
try:
subscriber_client.delete_subscription(subscription_sync_path)
except Exception:
pass
subscriber_client.create_subscription(subscription_sync_path, topic=topic)
yield subscription_sync_path
@pytest.fixture
def subscription_sync2(subscriber_client, topic):
subscription_sync_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION_SYNC2)
try:
subscriber_client.delete_subscription(subscription_sync_path)
except Exception:
pass
subscriber_client.create_subscription(subscription_sync_path, topic=topic)
yield subscription_sync_path
def test_list_in_topic(subscription, capsys):
@eventually_consistent.call
def _():
subscriber.list_subscriptions_in_topic(PROJECT, TOPIC)
out, _ = capsys.readouterr()
assert subscription in out
def test_list_in_project(subscription, capsys):
@eventually_consistent.call
def _():
subscriber.list_subscriptions_in_project(PROJECT)
out, _ = capsys.readouterr()
assert subscription in out
def test_create(subscriber_client):
subscription_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION)
try:
subscriber_client.delete_subscription(subscription_path)
except Exception:
pass
subscriber.create_subscription(PROJECT, TOPIC, SUBSCRIPTION)
@eventually_consistent.call
def _():
assert subscriber_client.get_subscription(subscription_path)
def test_create_push(subscriber_client):
subscription_path = subscriber_client.subscription_path(
PROJECT, SUBSCRIPTION)
try:
subscriber_client.delete_subscription(subscription_path)
except Exception:
pass
subscriber.create_push_subscription(PROJECT, TOPIC, SUBSCRIPTION, ENDPOINT)
@eventually_consistent.call
def _():
assert subscriber_client.get_subscription(subscription_path)
def test_delete(subscriber_client, subscription):
subscriber.delete_subscription(PROJECT, SUBSCRIPTION)
@eventually_consistent.call
def _():
with pytest.raises(Exception):
subscriber_client.get_subscription(subscription)
def test_update(subscriber_client, subscription, capsys):
subscriber.update_subscription(PROJECT, SUBSCRIPTION, NEW_ENDPOINT)
out, _ = capsys.readouterr()
assert 'Subscription updated' in out
def _publish_messages(publisher_client, topic):
for n in range(5):
data = u'Message {}'.format(n).encode('utf-8')
future = publisher_client.publish(
topic, data=data)
future.result()
def _publish_messages_with_custom_attributes(publisher_client, topic):
data = u'Test message'.encode('utf-8')
future = publisher_client.publish(topic, data=data, origin='python-sample')
future.result()
def _make_sleep_patch():
real_sleep = time.sleep
def new_sleep(period):
if period == 60:
real_sleep(5)
raise RuntimeError('sigil')
else:
real_sleep(period)
return mock.patch('time.sleep', new=new_sleep)
@flaky
def test_receive(publisher_client, topic, subscription, capsys):
_publish_messages(publisher_client, topic)
with _make_sleep_patch():
with pytest.raises(RuntimeError, match='sigil'):
subscriber.receive_messages(PROJECT, SUBSCRIPTION)
out, _ = capsys.readouterr()
assert 'Listening' in out
assert subscription in out
assert 'Message 1' in out
def test_receive_synchronously(
publisher_client, topic, subscription_sync1, capsys):
_publish_messages(publisher_client, topic)
subscriber.synchronous_pull(PROJECT, SUBSCRIPTION_SYNC1)
out, _ = capsys.readouterr()
assert 'Done.' in out
def test_receive_synchronously_with_lease(
publisher_client, topic, subscription_sync2, capsys):
_publish_messages(publisher_client, topic)
subscriber.synchronous_pull_with_lease_management(
PROJECT, SUBSCRIPTION_SYNC2)
out, _ = capsys.readouterr()
assert 'Done.' in out
def test_receive_with_custom_attributes(
publisher_client, topic, subscription, capsys):
_publish_messages_with_custom_attributes(publisher_client, topic)
with _make_sleep_patch():
with pytest.raises(RuntimeError, match='sigil'):
subscriber.receive_messages_with_custom_attributes(
PROJECT, SUBSCRIPTION)
out, _ = capsys.readouterr()
assert 'Test message' in out
assert 'origin' in out
assert 'python-sample' in out
def test_receive_with_flow_control(
publisher_client, topic, subscription, capsys):
_publish_messages(publisher_client, topic)
with _make_sleep_patch():
with pytest.raises(RuntimeError, match='sigil'):
subscriber.receive_messages_with_flow_control(
PROJECT, SUBSCRIPTION)
out, _ = capsys.readouterr()
assert 'Listening' in out
assert subscription in out
assert 'Message 1' in out
|
py
|
1a5dea43d7c9aa8488507ec0d8978249094056f0
|
# -*- coding: utf-8 -*-
# @Time : 2021/8/6 15:01
# @Author : zc
# @Desc : 查询现存量返回值实体
from chanjet_openapi_python_sdk.chanjet_response import ChanjetResponse
class QueryCurrentStockResponse(ChanjetResponse):
def __init__(self, data=None):
self.WarehouseName = ""
self.WarehouseCode = ""
self.InvLocationName = ""
self.InvLocationCode = ""
self.InventoryCode = ""
self.InventoryName = ""
self.InventoryClassCode = ""
self.InventoryClassName = ""
self.DefaultBarCode = ""
self.InvBarCode = ""
self.UnitName = ""
self.Specification = ""
self.Brand = ""
self.IsSingleUnit = ""
self.AvailableQuantity = ""
self.ExistingQuantity = ""
self.UnitName2 = ""
self.AvailableQuantity2 = ""
self.ExistingQuantity2 = ""
self.TS = ""
self.TotalCount = ""
self.SkuCode = ""
self.Batch = ""
self.ProductionDate = ""
self.ExpiryDate = ""
self.DynamicPropertyKeys = []
self.DynamicPropertyTitles = []
self.DynamicPropertyValues = []
if data:
self.__dict__ = data
def __str__(self):
return str(self.__dict__)
|
py
|
1a5dea4aa6fd2567e7752886241014d67acb3f02
|
# If not stated otherwise in this file or this component's LICENSE file the
# following copyright and licenses apply:
#
# Copyright 2020 Sky UK
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_utils
from os.path import basename
tests = [
test_utils.Test("Logging to file",
"filelogging",
["hello world 1","hello world 2","hello world 10"],
"Prints hello world 10 times, output should be contained in the logfile"),
test_utils.Test("No logging",
"nolog",
"",
"Starts a container without any logfile"),
]
def execute_test():
if test_utils.selected_platform == test_utils.Platforms.no_selection:
return test_utils.print_unsupported_platform(basename(__file__), test_utils.selected_platform)
with test_utils.dobby_daemon():
output_table = []
for test in tests:
result = test_container(test.container_id, test.expected_output)
output = test_utils.create_simple_test_output(test, result[0], result[1])
output_table.append(output)
test_utils.print_single_result(output)
return test_utils.count_print_results(output_table)
def test_container(container_id, expected_output):
"""Runs container and check if output contains expected output
Parameters:
container_id (string): name of container to run
expected_output (string): output that should be provided by containter
Returns:
(pass (bool), message (string)): Returns if expected output found and message
"""
test_utils.print_log("Running %s container test" % container_id, test_utils.Severity.debug)
with test_utils.untar_bundle(container_id) as bundle_path:
launch_result = test_utils.launch_container(container_id, bundle_path)
if launch_result:
return validate_output_file(container_id, expected_output)
return False, "Container did not launch successfully"
def validate_output_file(container_id, expected_output):
"""Helper function for finding if expected output is inside log of container
Parameters:
container_id (string): name of container to run
expected_output (string): output that should be provided by containter
Returns:
(pass (bool), message (string)): Returns if expected output found and message
"""
log = test_utils.get_container_log(container_id)
# If given a list of outputs to check, loop through and return false is one of them is not in the output
if isinstance(expected_output, list):
for text in expected_output:
if text.lower() not in log.lower():
return False, "Output file did not contain expected text"
return True, "Test passed"
# Otherwise we've been given a string, so just check that one string
if expected_output.lower() in log.lower():
return True, "Test passed"
else:
return False, "Output file did not contain expected text"
if __name__ == "__main__":
test_utils.parse_arguments(__file__, True)
execute_test()
|
py
|
1a5dea72dee833f7ced98717cc7c36acd67640db
|
import numpy as np
from .base_vec_env import VecEnv, VecEnvStepReturn, VecEnvWrapper
class VecExtractDictObs(VecEnvWrapper):
"""
A vectorized wrapper for extracting dictionary observations.
:param venv: The vectorized environment
:param key: The key of the dictionary observation
"""
def __init__(self, venv: VecEnv, key: str):
self.key = key
super().__init__(
venv=venv, observation_space=venv.observation_space.spaces[self.key]
)
def reset(self) -> np.ndarray:
obs = self.venv.reset()
return obs[self.key]
def step_wait(self) -> VecEnvStepReturn:
obs, reward, done, info = self.venv.step_wait()
return obs[self.key], reward, done, info
|
py
|
1a5deb98b3c26b232544b0ea920e40247e211fae
|
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='configuration-agent',
version='0.1.1',
description='IoT configuration agent',
long_description=readme,
license='Intel Proprietary (see \'licenses\' directory)',
packages=find_packages(exclude=['*.*', 'mqttclient']),
include_package_data=True,
install_requires=['nose', 'packaging', 'future'],
test_suite='nose.collector',
tests_require=['nose'])
|
py
|
1a5dec0f3bae0e49e29f631e55581ecb6f84d7f4
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.io_xk8s_cluster_v1alpha3_machine_list import IoXK8sClusterV1alpha3MachineList # noqa: E501
from kubernetes.client.rest import ApiException
class TestIoXK8sClusterV1alpha3MachineList(unittest.TestCase):
"""IoXK8sClusterV1alpha3MachineList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IoXK8sClusterV1alpha3MachineList
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.io_xk8s_cluster_v1alpha3_machine_list.IoXK8sClusterV1alpha3MachineList() # noqa: E501
if include_optional :
return IoXK8sClusterV1alpha3MachineList(
api_version = '0',
items = [
kubernetes.client.models.io/x_k8s/cluster/v1alpha3/machine.io.x-k8s.cluster.v1alpha3.Machine(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta_v2.v1.ObjectMeta_v2(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference_v2.v1.OwnerReference_v2(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
spec = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_spec.io_x_k8s_cluster_v1alpha3_Machine_spec(
bootstrap = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_spec_bootstrap.io_x_k8s_cluster_v1alpha3_Machine_spec_bootstrap(
config_ref = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_spec_bootstrap_config_ref.io_x_k8s_cluster_v1alpha3_Machine_spec_bootstrap_configRef(
api_version = '0',
field_path = '0',
kind = '0',
name = '0',
namespace = '0',
resource_version = '0',
uid = '0', ),
data = '0',
data_secret_name = '0', ),
cluster_name = '0',
failure_domain = '0',
infrastructure_ref = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha4_kubeadm_control_plane_spec_machine_template_infrastructure_ref.io_x_k8s_cluster_controlplane_v1alpha4_KubeadmControlPlane_spec_machineTemplate_infrastructureRef(
api_version = '0',
field_path = '0',
kind = '0',
name = '0',
namespace = '0',
resource_version = '0',
uid = '0', ),
node_drain_timeout = '0',
provider_id = '0',
version = '0', ),
status = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_status.io_x_k8s_cluster_v1alpha3_Machine_status(
addresses = [
kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_bastion_addresses.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_bastion_addresses(
address = '0',
type = '0', )
],
bootstrap_ready = True,
conditions = [
kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_status_conditions.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_status_conditions(
last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
reason = '0',
severity = '0',
status = '0',
type = '0', )
],
failure_message = '0',
failure_reason = '0',
infrastructure_ready = True,
last_updated = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
node_ref = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_status_node_ref.io_x_k8s_cluster_v1alpha3_Machine_status_nodeRef(
api_version = '0',
field_path = '0',
kind = '0',
name = '0',
namespace = '0',
resource_version = '0',
uid = '0', ),
observed_generation = 56,
phase = '0',
version = '0', ), )
],
kind = '0',
metadata = kubernetes.client.models.v1/list_meta.v1.ListMeta(
continue = '0',
remaining_item_count = 56,
resource_version = '0',
self_link = '0', )
)
else :
return IoXK8sClusterV1alpha3MachineList(
items = [
kubernetes.client.models.io/x_k8s/cluster/v1alpha3/machine.io.x-k8s.cluster.v1alpha3.Machine(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta_v2.v1.ObjectMeta_v2(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference_v2.v1.OwnerReference_v2(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
spec = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_spec.io_x_k8s_cluster_v1alpha3_Machine_spec(
bootstrap = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_spec_bootstrap.io_x_k8s_cluster_v1alpha3_Machine_spec_bootstrap(
config_ref = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_spec_bootstrap_config_ref.io_x_k8s_cluster_v1alpha3_Machine_spec_bootstrap_configRef(
api_version = '0',
field_path = '0',
kind = '0',
name = '0',
namespace = '0',
resource_version = '0',
uid = '0', ),
data = '0',
data_secret_name = '0', ),
cluster_name = '0',
failure_domain = '0',
infrastructure_ref = kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha4_kubeadm_control_plane_spec_machine_template_infrastructure_ref.io_x_k8s_cluster_controlplane_v1alpha4_KubeadmControlPlane_spec_machineTemplate_infrastructureRef(
api_version = '0',
field_path = '0',
kind = '0',
name = '0',
namespace = '0',
resource_version = '0',
uid = '0', ),
node_drain_timeout = '0',
provider_id = '0',
version = '0', ),
status = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_status.io_x_k8s_cluster_v1alpha3_Machine_status(
addresses = [
kubernetes.client.models.io_x_k8s_cluster_controlplane_v1alpha3_aws_managed_control_plane_status_bastion_addresses.io_x_k8s_cluster_controlplane_v1alpha3_AWSManagedControlPlane_status_bastion_addresses(
address = '0',
type = '0', )
],
bootstrap_ready = True,
conditions = [
kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_status_conditions.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_status_conditions(
last_transition_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
message = '0',
reason = '0',
severity = '0',
status = '0',
type = '0', )
],
failure_message = '0',
failure_reason = '0',
infrastructure_ready = True,
last_updated = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
node_ref = kubernetes.client.models.io_x_k8s_cluster_v1alpha3_machine_status_node_ref.io_x_k8s_cluster_v1alpha3_Machine_status_nodeRef(
api_version = '0',
field_path = '0',
kind = '0',
name = '0',
namespace = '0',
resource_version = '0',
uid = '0', ),
observed_generation = 56,
phase = '0',
version = '0', ), )
],
)
def testIoXK8sClusterV1alpha3MachineList(self):
"""Test IoXK8sClusterV1alpha3MachineList"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
py
|
1a5dec64f1678bb488242c967a6dc8d16b4c85a1
|
# Databricks notebook source exported at Fri, 11 Nov 2016 16:51:59 UTC
# MAGIC %md
# MAGIC <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"> <img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by-nc-nd/4.0/88x31.png"/> </a> <br/> This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc-nd/4.0/"> Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License. </a>
# COMMAND ----------
# MAGIC %md
# MAGIC # + 
# MAGIC
# MAGIC <img src="http://spark-mooc.github.io/web-assets/images/cs110x/movie-camera.png" style="float:right; height: 200px; margin: 10px; border: 1px solid #ddd; border-radius: 15px 15px 15px 15px; padding: 10px"/>
# MAGIC
# MAGIC # Predicting Movie Ratings
# MAGIC
# MAGIC One of the most common uses of big data is to predict what users want. This allows Google to show you relevant ads, Amazon to recommend relevant products, and Netflix to recommend movies that you might like. This lab will demonstrate how we can use Apache Spark to recommend movies to a user. We will start with some basic techniques, and then use the [Spark ML][sparkml] library's Alternating Least Squares method to make more sophisticated predictions.
# MAGIC
# MAGIC For this lab, we will use a subset dataset of 20 million ratings. This dataset is pre-mounted on Databricks and is from the [MovieLens stable benchmark rating dataset](http://grouplens.org/datasets/movielens/). However, the same code you write will also work on the full dataset (though running with the full dataset on Community Edition is likely to take quite a long time).
# MAGIC
# MAGIC In this lab:
# MAGIC * *Part 0*: Preliminaries
# MAGIC * *Part 1*: Basic Recommendations
# MAGIC * *Part 2*: Collaborative Filtering
# MAGIC * *Part 3*: Predictions for Yourself
# MAGIC
# MAGIC As mentioned during the first Learning Spark lab, think carefully before calling `collect()` on any datasets. When you are using a small dataset, calling `collect()` and then using Python to get a sense for the data locally (in the driver program) will work fine, but this will not work when you are using a large dataset that doesn't fit in memory on one machine.
# MAGIC [sparkml]: https://spark.apache.org/docs/1.6.2/api/python/pyspark.ml.html
# COMMAND ----------
# MAGIC %md
# MAGIC ## Code
# MAGIC
# MAGIC This assignment can be completed using basic Python and pySpark DataFrame Transformations and Actions. Libraries other than math are not necessary. With the exception of the ML functions that we introduce in this assignment, you should be able to complete all parts of this homework using only the Spark functions you have used in prior lab exercises (although you are welcome to use more features of Spark if you like!).
# MAGIC
# MAGIC # MAGIC
# MAGIC The following cell defines the locations of the data files. If you want to run an exported version of this lab on your own machine (i.e., outside of Databricks), you'll need to download your own copy of the 20-million movie data set, and you'll need to adjust the paths, below.
# MAGIC
# MAGIC **To Do**: Run the following cell.
# COMMAND ----------
import os
from databricks_test_helper import Test
dbfs_dir = '/databricks-datasets/cs110x/ml-20m/data-001'
ratings_filename = dbfs_dir + '/ratings.csv'
movies_filename = dbfs_dir + '/movies.csv'
# The following line is here to enable this notebook to be exported as source and
# run on a local machine with a local copy of the files. Just change the dbfs_dir,
# above.
if os.path.sep != '/':
# Handle Windows.
ratings_filename = ratings_filename.replace('/', os.path.sep)
movie_filename = movie_filename.replace('/', os.path.sep)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Part 0: Preliminaries
# MAGIC
# MAGIC We read in each of the files and create a DataFrame consisting of parsed lines.
# MAGIC
# MAGIC ### The 20-million movie sample
# MAGIC
# MAGIC The 20-million movie sample consists of CSV files (with headers), so there's no need to parse the files manually, as Spark CSV can do the job.
# COMMAND ----------
# MAGIC %md
# MAGIC First, let's take a look at the directory containing our files.
# COMMAND ----------
display(dbutils.fs.ls(dbfs_dir))
# COMMAND ----------
# MAGIC %md
# MAGIC ### CPU vs I/O tradeoff
# MAGIC
# MAGIC Note that we have both compressed files (ending in `.gz`) and uncompressed files. We have a CPU vs. I/O tradeoff here. If I/O is the bottleneck, then we want to process the compressed files and pay the extra CPU overhead. If CPU is the bottleneck, then it makes more sense to process the uncompressed files.
# MAGIC
# MAGIC We've done some experiments, and we've determined that CPU is more of a bottleneck than I/O, on Community Edition. So, we're going to process the uncompressed data. In addition, we're going to speed things up further by specifying the DataFrame schema explicitly. (When the Spark CSV adapter infers the schema from a CSV file, it has to make an extra pass over the file. That'll slow things down here, and it isn't really necessary.)
# MAGIC
# MAGIC **To Do**: Run the following cell, which will define the schemas.
# COMMAND ----------
from pyspark.sql.types import *
ratings_df_schema = StructType(
[StructField('userId', IntegerType()),
StructField('movieId', IntegerType()),
StructField('rating', DoubleType())]
)
movies_df_schema = StructType(
[StructField('ID', IntegerType()),
StructField('title', StringType())]
)
# COMMAND ----------
# MAGIC %md
# MAGIC ### Load and Cache
# MAGIC
# MAGIC The Databricks File System (DBFS) sits on top of S3. We're going to be accessing this data a lot. Rather than read it over and over again from S3, we'll cache both
# MAGIC the movies DataFrame and the ratings DataFrame in memory.
# MAGIC
# MAGIC **To Do**: Run the following cell to load and cache the data. Please be patient: The code takes about 30 seconds to run.
# COMMAND ----------
from pyspark.sql.functions import regexp_extract
from pyspark.sql.types import *
raw_ratings_df = sqlContext.read.format('com.databricks.spark.csv').options(header=True, inferSchema=False).schema(ratings_df_schema).load(ratings_filename)
ratings_df = raw_ratings_df.drop('Timestamp')
raw_movies_df = sqlContext.read.format('com.databricks.spark.csv').options(header=True, inferSchema=False).schema(movies_df_schema).load(movies_filename)
movies_df = raw_movies_df.drop('Genres').withColumnRenamed('movieId', 'ID')
ratings_df.cache()
movies_df.cache()
assert ratings_df.is_cached
assert movies_df.is_cached
raw_ratings_count = raw_ratings_df.count()
ratings_count = ratings_df.count()
raw_movies_count = raw_movies_df.count()
movies_count = movies_df.count()
print 'There are %s ratings and %s movies in the datasets' % (ratings_count, movies_count)
print 'Ratings:'
ratings_df.show(3)
print 'Movies:'
movies_df.show(3, truncate=False)
assert raw_ratings_count == ratings_count
assert raw_movies_count == movies_count
# COMMAND ----------
# MAGIC %md
# MAGIC Next, let's do a quick verification of the data.
# MAGIC
# MAGIC **To do**: Run the following cell. It should run without errors.
# COMMAND ----------
assert ratings_count == 20000263
assert movies_count == 27278
assert movies_df.filter(movies_df.title == 'Toy Story (1995)').count() == 1
assert ratings_df.filter((ratings_df.userId == 6) & (ratings_df.movieId == 1) & (ratings_df.rating == 5.0)).count() == 1
# COMMAND ----------
# MAGIC %md
# MAGIC Let's take a quick look at some of the data in the two DataFrames.
# MAGIC
# MAGIC **To Do**: Run the following two cells.
# COMMAND ----------
display(movies_df)
# COMMAND ----------
display(ratings_df)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Part 1: Basic Recommendations
# MAGIC
# MAGIC One way to recommend movies is to always recommend the movies with the highest average rating. In this part, we will use Spark to find the name, number of ratings, and the average rating of the 20 movies with the highest average rating and at least 500 reviews. We want to filter our movies with high ratings but greater than or equal to 500 reviews because movies with few reviews may not have broad appeal to everyone.
# COMMAND ----------
# MAGIC %md
# MAGIC ### (1a) Movies with Highest Average Ratings
# MAGIC
# MAGIC Let's determine the movies with the highest average ratings.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC
# MAGIC 1. Recall that the `ratings_df` contains three columns:
# MAGIC - The ID of the user who rated the film
# MAGIC - the ID of the movie being rated
# MAGIC - and the rating.
# MAGIC
# MAGIC First, transform `ratings_df` into a second DataFrame, `movie_ids_with_avg_ratings`, with the following columns:
# MAGIC - The movie ID
# MAGIC - The number of ratings for the movie
# MAGIC - The average of all the movie's ratings
# MAGIC
# MAGIC 2. Transform `movie_ids_with_avg_ratings` to another DataFrame, `movie_names_with_avg_ratings_df` that adds the movie name to each row. `movie_names_with_avg_ratings_df`
# MAGIC will contain these columns:
# MAGIC - The movie ID
# MAGIC - The movie name
# MAGIC - The number of ratings for the movie
# MAGIC - The average of all the movie's ratings
# MAGIC
# MAGIC **Hint**: You'll need to do a join.
# MAGIC
# MAGIC You should end up with something like the following:
# MAGIC ```
# MAGIC movie_ids_with_avg_ratings_df:
# MAGIC +-------+-----+------------------+
# MAGIC |movieId|count|average |
# MAGIC +-------+-----+------------------+
# MAGIC |1831 |7463 |2.5785207021305103|
# MAGIC |431 |8946 |3.695059244355019 |
# MAGIC |631 |2193 |2.7273141814865483|
# MAGIC +-------+-----+------------------+
# MAGIC only showing top 3 rows
# MAGIC
# MAGIC movie_names_with_avg_ratings_df:
# MAGIC +-------+-----------------------------+-----+-------+
# MAGIC |average|title |count|movieId|
# MAGIC +-------+-----------------------------+-----+-------+
# MAGIC |5.0 |Ella Lola, a la Trilby (1898)|1 |94431 |
# MAGIC |5.0 |Serving Life (2011) |1 |129034 |
# MAGIC |5.0 |Diplomatic Immunity (2009? ) |1 |107434 |
# MAGIC +-------+-----------------------------+-----+-------+
# MAGIC only showing top 3 rows
# MAGIC ```
# COMMAND ----------
# TODO: Replace <FILL_IN> with appropriate code
from pyspark.sql import functions as F
# From ratingsDF, create a movie_ids_with_avg_ratings_df that combines the two DataFrames
movie_ids_with_avg_ratings_df = ratings_df.groupBy('movieId').agg(F.count(ratings_df.rating).alias("count"), F.avg(ratings_df.rating).alias("average"))
print 'movie_ids_with_avg_ratings_df:'
movie_ids_with_avg_ratings_df.show(3, truncate=False)
# Note: movie_names_df is a temporary variable, used only to separate the steps necessary
# to create the movie_names_with_avg_ratings_df DataFrame.
movie_names_df = movie_ids_with_avg_ratings_df.join(movies_df, movie_ids_with_avg_ratings_df.movieId == movies_df.ID, 'inner')
movie_names_with_avg_ratings_df = movie_names_df.select(movie_names_df['average'], movie_names_df['title'], movie_names_df['count'], movie_names_df['movieId']).sort('average', ascending=False)
print 'movie_names_with_avg_ratings_df:'
movie_names_with_avg_ratings_df.show(3, truncate=False)
# COMMAND ----------
# TEST Movies with Highest Average Ratings (1a)
Test.assertEquals(movie_ids_with_avg_ratings_df.count(), 26744,
'incorrect movie_ids_with_avg_ratings_df.count() (expected 26744)')
movie_ids_with_ratings_take_ordered = movie_ids_with_avg_ratings_df.orderBy('MovieID').take(3)
_take_0 = movie_ids_with_ratings_take_ordered[0]
_take_1 = movie_ids_with_ratings_take_ordered[1]
_take_2 = movie_ids_with_ratings_take_ordered[2]
Test.assertTrue(_take_0[0] == 1 and _take_0[1] == 49695,
'incorrect count of ratings for movie with ID {0} (expected 49695)'.format(_take_0[0]))
Test.assertEquals(round(_take_0[2], 2), 3.92, "Incorrect average for movie ID {0}. Expected 3.92".format(_take_0[0]))
Test.assertTrue(_take_1[0] == 2 and _take_1[1] == 22243,
'incorrect count of ratings for movie with ID {0} (expected 22243)'.format(_take_1[0]))
Test.assertEquals(round(_take_1[2], 2), 3.21, "Incorrect average for movie ID {0}. Expected 3.21".format(_take_1[0]))
Test.assertTrue(_take_2[0] == 3 and _take_2[1] == 12735,
'incorrect count of ratings for movie with ID {0} (expected 12735)'.format(_take_2[0]))
Test.assertEquals(round(_take_2[2], 2), 3.15, "Incorrect average for movie ID {0}. Expected 3.15".format(_take_2[0]))
Test.assertEquals(movie_names_with_avg_ratings_df.count(), 26744,
'incorrect movie_names_with_avg_ratings_df.count() (expected 26744)')
movie_names_with_ratings_take_ordered = movie_names_with_avg_ratings_df.orderBy(['average', 'title']).take(3)
result = [(r['average'], r['title'], r['count'], r['movieId']) for r in movie_names_with_ratings_take_ordered]
Test.assertEquals(result,
[(0.5, u'13 Fighting Men (1960)', 1, 109355),
(0.5, u'20 Years After (2008)', 1, 131062),
(0.5, u'3 Holiday Tails (Golden Christmas 2: The Second Tail, A) (2011)', 1, 111040)],
'incorrect top 3 entries in movie_names_with_avg_ratings_df')
# COMMAND ----------
# MAGIC %md
# MAGIC ### (1b) Movies with Highest Average Ratings and at least 500 reviews
# MAGIC
# MAGIC Now that we have a DataFrame of the movies with highest average ratings, we can use Spark to determine the 20 movies with highest average ratings and at least 500 reviews.
# MAGIC
# MAGIC Add a single DataFrame transformation (in place of `<FILL_IN>`, below) to limit the results to movies with ratings from at least 500 people.
# COMMAND ----------
# TODO: Replace <FILL IN> with appropriate code
movies_with_500_ratings_or_more = movie_names_with_avg_ratings_df.filter(movie_names_with_avg_ratings_df['count'] >= 500)
print 'Movies with highest ratings:'
movies_with_500_ratings_or_more.show(20, truncate=False)
# COMMAND ----------
# TEST Movies with Highest Average Ratings and at least 500 Reviews (1b)
Test.assertEquals(movies_with_500_ratings_or_more.count(), 4489,
'incorrect movies_with_500_ratings_or_more.count(). Expected 4489.')
top_20_results = [(r['average'], r['title'], r['count']) for r in movies_with_500_ratings_or_more.orderBy(F.desc('average')).take(20)]
Test.assertEquals(top_20_results,
[(4.446990499637029, u'Shawshank Redemption, The (1994)', 63366),
(4.364732196832306, u'Godfather, The (1972)', 41355),
(4.334372207803259, u'Usual Suspects, The (1995)', 47006),
(4.310175010988133, u"Schindler's List (1993)", 50054),
(4.275640557704942, u'Godfather: Part II, The (1974)', 27398),
(4.2741796572216, u'Seven Samurai (Shichinin no samurai) (1954)', 11611),
(4.271333600779414, u'Rear Window (1954)', 17449),
(4.263182346109176, u'Band of Brothers (2001)', 4305),
(4.258326830670664, u'Casablanca (1942)', 24349),
(4.256934865900383, u'Sunset Blvd. (a.k.a. Sunset Boulevard) (1950)', 6525),
(4.24807897901911, u"One Flew Over the Cuckoo's Nest (1975)", 29932),
(4.247286821705426, u'Dr. Strangelove or: How I Learned to Stop Worrying and Love the Bomb (1964)', 23220),
(4.246001523229246, u'Third Man, The (1949)', 6565),
(4.235410064157069, u'City of God (Cidade de Deus) (2002)', 12937),
(4.2347902097902095, u'Lives of Others, The (Das leben der Anderen) (2006)', 5720),
(4.233538107122288, u'North by Northwest (1959)', 15627),
(4.2326233183856505, u'Paths of Glory (1957)', 3568),
(4.227123123722136, u'Fight Club (1999)', 40106),
(4.224281931146873, u'Double Indemnity (1944)', 4909),
(4.224137931034483, u'12 Angry Men (1957)', 12934)],
'Incorrect top 20 movies with 500 or more ratings')
# COMMAND ----------
# MAGIC %md
# MAGIC Using a threshold on the number of reviews is one way to improve the recommendations, but there are many other good ways to improve quality. For example, you could weight ratings by the number of ratings.
# COMMAND ----------
# MAGIC %md
# MAGIC ## Part 2: Collaborative Filtering
# MAGIC In this course, you have learned about many of the basic transformations and actions that Spark allows us to apply to distributed datasets. Spark also exposes some higher level functionality; in particular, Machine Learning using a component of Spark called [MLlib][mllib]. In this part, you will learn how to use MLlib to make personalized movie recommendations using the movie data we have been analyzing.
# MAGIC
# MAGIC <img src="https://courses.edx.org/c4x/BerkeleyX/CS100.1x/asset/Collaborative_filtering.gif" alt="collaborative filtering" style="float: right"/>
# MAGIC
# MAGIC We are going to use a technique called [collaborative filtering][collab]. Collaborative filtering is a method of making automatic predictions (filtering) about the interests of a user by collecting preferences or taste information from many users (collaborating). The underlying assumption of the collaborative filtering approach is that if a person A has the same opinion as a person B on an issue, A is more likely to have B's opinion on a different issue x than to have the opinion on x of a person chosen randomly. You can read more about collaborative filtering [here][collab2].
# MAGIC
# MAGIC The image at the right (from [Wikipedia][collab]) shows an example of predicting of the user's rating using collaborative filtering. At first, people rate different items (like videos, images, games). After that, the system is making predictions about a user's rating for an item, which the user has not rated yet. These predictions are built upon the existing ratings of other users, who have similar ratings with the active user. For instance, in the image below the system has made a prediction, that the active user will not like the video.
# MAGIC
# MAGIC <br clear="all"/>
# MAGIC
# MAGIC ----
# MAGIC
# MAGIC For movie recommendations, we start with a matrix whose entries are movie ratings by users (shown in red in the diagram below). Each column represents a user (shown in green) and each row represents a particular movie (shown in blue).
# MAGIC
# MAGIC Since not all users have rated all movies, we do not know all of the entries in this matrix, which is precisely why we need collaborative filtering. For each user, we have ratings for only a subset of the movies. With collaborative filtering, the idea is to approximate the ratings matrix by factorizing it as the product of two matrices: one that describes properties of each user (shown in green), and one that describes properties of each movie (shown in blue).
# MAGIC
# MAGIC <img alt="factorization" src="http://spark-mooc.github.io/web-assets/images/matrix_factorization.png" style="width: 885px"/>
# MAGIC <br clear="all"/>
# MAGIC
# MAGIC We want to select these two matrices such that the error for the users/movie pairs where we know the correct ratings is minimized. The [Alternating Least Squares][als] algorithm does this by first randomly filling the users matrix with values and then optimizing the value of the movies such that the error is minimized. Then, it holds the movies matrix constant and optimizes the value of the user's matrix. This alternation between which matrix to optimize is the reason for the "alternating" in the name.
# MAGIC
# MAGIC This optimization is what's being shown on the right in the image above. Given a fixed set of user factors (i.e., values in the users matrix), we use the known ratings to find the best values for the movie factors using the optimization written at the bottom of the figure. Then we "alternate" and pick the best user factors given fixed movie factors.
# MAGIC
# MAGIC [als]: https://en.wikiversity.org/wiki/Least-Squares_Method
# MAGIC [mllib]: http://spark.apache.org/docs/1.6.2/mllib-guide.html
# MAGIC [collab]: https://en.wikipedia.org/?title=Collaborative_filtering
# MAGIC [collab2]: http://recommender-systems.org/collaborative-filtering/
# COMMAND ----------
# MAGIC %md
# MAGIC ### (2a) Creating a Training Set
# MAGIC
# MAGIC Before we jump into using machine learning, we need to break up the `ratings_df` dataset into three pieces:
# MAGIC * A training set (DataFrame), which we will use to train models
# MAGIC * A validation set (DataFrame), which we will use to choose the best model
# MAGIC * A test set (DataFrame), which we will use for our experiments
# MAGIC
# MAGIC To randomly split the dataset into the multiple groups, we can use the pySpark [randomSplit()](http://spark.apache.org/docs/1.6.2/api/python/pyspark.sql.html#pyspark.sql.DataFrame.randomSplit) transformation. `randomSplit()` takes a set of splits and a seed and returns multiple DataFrames.
# COMMAND ----------
# TODO: Replace <FILL_IN> with the appropriate code.
# We'll hold out 60% for training, 20% of our data for validation, and leave 20% for testing
seed = 1800009193L
(split_60_df, split_a_20_df, split_b_20_df) = ratings_df.randomSplit([.60,.20,.20], seed)
# Let's cache these datasets for performance
training_df = split_60_df.cache()
validation_df = split_a_20_df.cache()
test_df = split_b_20_df.cache()
print('Training: {0}, validation: {1}, test: {2}\n'.format(
training_df.count(), validation_df.count(), test_df.count())
)
training_df.show(3)
validation_df.show(3)
test_df.show(3)
# COMMAND ----------
# TEST Creating a Training Set (2a)
Test.assertEquals(training_df.count(), 12001389, "Incorrect training_df count. Expected 12001389")
Test.assertEquals(validation_df.count(), 4003694, "Incorrect validation_df count. Expected 4003694")
Test.assertEquals(test_df.count(), 3995180, "Incorrect test_df count. Expected 3995180")
Test.assertEquals(training_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 5952) & (ratings_df.rating == 5.0)).count(), 1)
Test.assertEquals(training_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 1193) & (ratings_df.rating == 3.5)).count(), 1)
Test.assertEquals(training_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 1196) & (ratings_df.rating == 4.5)).count(), 1)
Test.assertEquals(validation_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 296) & (ratings_df.rating == 4.0)).count(), 1)
Test.assertEquals(validation_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 32) & (ratings_df.rating == 3.5)).count(), 1)
Test.assertEquals(validation_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 6888) & (ratings_df.rating == 3.0)).count(), 1)
Test.assertEquals(test_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 4993) & (ratings_df.rating == 5.0)).count(), 1)
Test.assertEquals(test_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 4128) & (ratings_df.rating == 4.0)).count(), 1)
Test.assertEquals(test_df.filter((ratings_df.userId == 1) & (ratings_df.movieId == 4915) & (ratings_df.rating == 3.0)).count(), 1)
# COMMAND ----------
# MAGIC %md
# MAGIC After splitting the dataset, your training set has about 12 million entries and the validation and test sets each have about 4 million entries. (The exact number of entries in each dataset varies slightly due to the random nature of the `randomSplit()` transformation.)
# COMMAND ----------
# MAGIC %md
# MAGIC ### (2b) Alternating Least Squares
# MAGIC
# MAGIC In this part, we will use the Apache Spark ML Pipeline implementation of Alternating Least Squares, [ALS](http://spark.apache.org/docs/1.6.2/api/python/pyspark.ml.html#pyspark.ml.recommendation.ALS). ALS takes a training dataset (DataFrame) and several parameters that control the model creation process. To determine the best values for the parameters, we will use ALS to train several models, and then we will select the best model and use the parameters from that model in the rest of this lab exercise.
# MAGIC
# MAGIC The process we will use for determining the best model is as follows:
# MAGIC 1. Pick a set of model parameters. The most important parameter to model is the *rank*, which is the number of columns in the Users matrix (green in the diagram above) or the number of rows in the Movies matrix (blue in the diagram above). In general, a lower rank will mean higher error on the training dataset, but a high rank may lead to [overfitting](https://en.wikipedia.org/wiki/Overfitting). We will train models with ranks of 4, 8, and 12 using the `training_df` dataset.
# MAGIC
# MAGIC 2. Set the appropriate parameters on the `ALS` object:
# MAGIC * The "User" column will be set to the values in our `userId` DataFrame column.
# MAGIC * The "Item" column will be set to the values in our `movieId` DataFrame column.
# MAGIC * The "Rating" column will be set to the values in our `rating` DataFrame column.
# MAGIC * We'll using a regularization parameter of 0.1.
# MAGIC
# MAGIC **Note**: Read the documentation for the [ALS](http://spark.apache.org/docs/1.6.2/api/python/pyspark.ml.html#pyspark.ml.recommendation.ALS) class **carefully**. It will help you accomplish this step.
# MAGIC 3. Have the ALS output transformation (i.e., the result of [ALS.fit()](http://spark.apache.org/docs/1.6.2/api/python/pyspark.ml.html#pyspark.ml.recommendation.ALS.fit)) produce a _new_ column
# MAGIC called "prediction" that contains the predicted value.
# MAGIC
# MAGIC 4. Create multiple models using [ALS.fit()](http://spark.apache.org/docs/1.6.2/api/python/pyspark.ml.html#pyspark.ml.recommendation.ALS.fit), one for each of our rank values. We'll fit
# MAGIC against the training data set (`training_df`).
# MAGIC
# MAGIC 5. For each model, we'll run a prediction against our validation data set (`validation_df`) and check the error.
# MAGIC
# MAGIC 6. We'll keep the model with the best error rate.
# MAGIC
# MAGIC #### Why are we doing our own cross-validation?
# MAGIC
# MAGIC A challenge for collaborative filtering is how to provide ratings to a new user (a user who has not provided *any* ratings at all). Some recommendation systems choose to provide new users with a set of default ratings (e.g., an average value across all ratings), while others choose to provide no ratings for new users. Spark's ALS algorithm yields a NaN (`Not a Number`) value when asked to provide a rating for a new user.
# MAGIC
# MAGIC Using the ML Pipeline's [CrossValidator](http://spark.apache.org/docs/1.6.2/api/python/pyspark.ml.html#pyspark.ml.tuning.CrossValidator) with ALS is thus problematic, because cross validation involves dividing the training data into a set of folds (e.g., three sets) and then using those folds for testing and evaluating the parameters during the parameter grid search process. It is likely that some of the folds will contain users that are not in the other folds, and, as a result, ALS produces NaN values for those new users. When the CrossValidator uses the Evaluator (RMSE) to compute an error metric, the RMSE algorithm will return NaN. This will make *all* of the parameters in the parameter grid appear to be equally good (or bad).
# MAGIC
# MAGIC You can read the discussion on [Spark JIRA 14489](https://issues.apache.org/jira/browse/SPARK-14489) about this issue. There are proposed workarounds of having ALS provide default values or having RMSE drop NaN values. Both introduce potential issues. We have chosen to have RMSE drop NaN values. While this does not solve the underlying issue of ALS not predicting a value for a new user, it does provide some evaluation value. We manually implement the parameter grid search process using a for loop (below) and remove the NaN values before using RMSE.
# MAGIC
# MAGIC For a production application, you would want to consider the tradeoffs in how to handle new users.
# MAGIC
# MAGIC **Note**: This cell will likely take a couple of minutes to run.
# COMMAND ----------
# TODO: Replace <FILL IN> with appropriate code
# This step is broken in ML Pipelines: https://issues.apache.org/jira/browse/SPARK-14489
from pyspark.ml.recommendation import ALS
# Let's initialize our ALS learner
als = ALS()
# Now we set the parameters for the method
als.setMaxIter(5)\
.setSeed(seed)\
.setRegParam(0.1)\
.setUserCol('userId')\
.setItemCol('movieId')\
.setRatingCol('rating')
# Now let's compute an evaluation metric for our test dataset
from pyspark.ml.evaluation import RegressionEvaluator
# Create an RMSE evaluator using the label and predicted columns
reg_eval = RegressionEvaluator(predictionCol="prediction", labelCol="rating", metricName="rmse")
tolerance = 0.03
ranks = [4, 8, 12]
errors = [0, 0, 0]
models = [0, 0, 0]
err = 0
min_error = float('inf')
best_rank = -1
for rank in ranks:
# Set the rank here:
als.setRank(rank)
# Create the model with these parameters.
model = als.fit(training_df)
# Run the model to create a prediction. Predict against the validation_df.
predict_df = model.transform(validation_df)
# Remove NaN values from prediction (due to SPARK-14489)
predicted_ratings_df = predict_df.filter(predict_df.prediction != float('nan'))
# Run the previously created RMSE evaluator, reg_eval, on the predicted_ratings_df DataFrame
error = reg_eval.evaluate(predicted_ratings_df)
errors[err] = error
models[err] = model
print 'For rank %s the RMSE is %s' % (rank, error)
if error < min_error:
min_error = error
best_rank = err
err += 1
als.setRank(ranks[best_rank])
print 'The best model was trained with rank %s' % ranks[best_rank]
my_model = models[best_rank]
# COMMAND ----------
# TEST
Test.assertEquals(round(min_error, 2), 0.81, "Unexpected value for best RMSE. Expected rounded value to be 0.81. Got {0}".format(round(min_error, 2)))
Test.assertEquals(ranks[best_rank], 12, "Unexpected value for best rank. Expected 12. Got {0}".format(ranks[best_rank]))
Test.assertEqualsHashed(als.getItemCol(), "18f0e2357f8829fe809b2d95bc1753000dd925a6", "Incorrect choice of {0} for ALS item column.".format(als.getItemCol()))
Test.assertEqualsHashed(als.getUserCol(), "db36668fa9a19fde5c9676518f9e86c17cabf65a", "Incorrect choice of {0} for ALS user column.".format(als.getUserCol()))
Test.assertEqualsHashed(als.getRatingCol(), "3c2d687ef032e625aa4a2b1cfca9751d2080322c", "Incorrect choice of {0} for ALS rating column.".format(als.getRatingCol()))
# COMMAND ----------
# MAGIC %md
# MAGIC ### (2c) Testing Your Model
# MAGIC
# MAGIC So far, we used the `training_df` and `validation_df` datasets to select the best model. Since we used these two datasets to determine what model is best, we cannot use them to test how good the model is; otherwise, we would be very vulnerable to [overfitting](https://en.wikipedia.org/wiki/Overfitting). To decide how good our model is, we need to use the `test_df` dataset. We will use the `best_rank` you determined in part (2b) to create a model for predicting the ratings for the test dataset and then we will compute the RMSE.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Run a prediction, using `my_model` as created above, on the test dataset (`test_df`), producing a new `predict_df` DataFrame.
# MAGIC * Filter out unwanted NaN values (necessary because of [a bug in Spark](https://issues.apache.org/jira/browse/SPARK-14489)). We've supplied this piece of code for you.
# MAGIC * Use the previously created RMSE evaluator, `reg_eval` to evaluate the filtered DataFrame.
# COMMAND ----------
# TODO: Replace <FILL_IN> with the appropriate code
# In ML Pipelines, this next step has a bug that produces unwanted NaN values. We
# have to filter them out. See https://issues.apache.org/jira/browse/SPARK-14489
predict_df = my_model.transform(test_df)
# Remove NaN values from prediction (due to SPARK-14489)
predicted_test_df = predict_df.filter(predict_df.prediction != float('nan'))
# Run the previously created RMSE evaluator, reg_eval, on the predicted_test_df DataFrame
test_RMSE = reg_eval.evaluate(predicted_test_df)
print('The model had a RMSE on the test set of {0}'.format(test_RMSE))
# COMMAND ----------
# TEST Testing Your Model (2c)
Test.assertTrue(abs(test_RMSE - 0.809624038485) < tolerance, 'incorrect test_RMSE: {0:.11f}'.format(test_RMSE))
# COMMAND ----------
# MAGIC %md
# MAGIC ### (2d) Comparing Your Model
# MAGIC
# MAGIC Looking at the RMSE for the results predicted by the model versus the values in the test set is one way to evalute the quality of our model. Another way to evaluate the model is to evaluate the error from a test set where every rating is the average rating for the training set.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Use the `training_df` to compute the average rating across all movies in that training dataset.
# MAGIC * Use the average rating that you just determined and the `test_df` to create a DataFrame (`test_for_avg_df`) with a `prediction` column containing the average rating. **HINT**: You'll want to use the `lit()` function,
# MAGIC from `pyspark.sql.functions`, available here as `F.lit()`.
# MAGIC * Use our previously created `reg_eval` object to evaluate the `test_for_avg_df` and calculate the RMSE.
# COMMAND ----------
# TODO: Replace <FILL_IN> with the appropriate code.
# Compute the average rating
avg_rating_df = training_df.agg(F.avg(training_df.rating).alias('average'))
# Extract the average rating value. (This is row 0, column 0.)
training_avg_rating = avg_rating_df.collect()[0][0]
print('The average rating for movies in the training set is {0}'.format(training_avg_rating))
# Add a column with the average rating
test_for_avg_df = test_df.withColumn('prediction', F.lit(training_avg_rating))
# Run the previously created RMSE evaluator, reg_eval, on the test_for_avg_df DataFrame
test_avg_RMSE = reg_eval.evaluate(test_for_avg_df)
print("The RMSE on the average set is {0}".format(test_avg_RMSE))
# COMMAND ----------
# TEST Comparing Your Model (2d)
Test.assertTrue(abs(training_avg_rating - 3.52547984237) < 0.000001,
'incorrect training_avg_rating (expected 3.52547984237): {0:.11f}'.format(training_avg_rating))
Test.assertTrue(abs(test_avg_RMSE - 1.05190953037) < 0.000001,
'incorrect test_avg_RMSE (expected 1.0519743756): {0:.11f}'.format(test_avg_RMSE))
# COMMAND ----------
# MAGIC %md
# MAGIC You now have code to predict how users will rate movies!
# COMMAND ----------
# MAGIC %md
# MAGIC ## Part 3: Predictions for Yourself
# MAGIC The ultimate goal of this lab exercise is to predict what movies to recommend to yourself. In order to do that, you will first need to add ratings for yourself to the `ratings_df` dataset.
# COMMAND ----------
# MAGIC %md
# MAGIC **(3a) Your Movie Ratings**
# MAGIC
# MAGIC To help you provide ratings for yourself, we have included the following code to list the names and movie IDs of the 50 highest-rated movies from `movies_with_500_ratings_or_more` which we created in part 1 the lab.
# COMMAND ----------
print 'Most rated movies:'
print '(average rating, movie name, number of reviews, movie ID)'
display(movies_with_500_ratings_or_more.orderBy(movies_with_500_ratings_or_more['average'].desc()).take(1000))
# COMMAND ----------
# MAGIC %md
# MAGIC The user ID 0 is unassigned, so we will use it for your ratings. We set the variable `my_user_ID` to 0 for you. Next, create a new DataFrame called `my_ratings_df`, with your ratings for at least 10 movie ratings. Each entry should be formatted as `(my_user_id, movieID, rating)`. As in the original dataset, ratings should be between 1 and 5 (inclusive). If you have not seen at least 10 of these movies, you can increase the parameter passed to `take()` in the above cell until there are 10 movies that you have seen (or you can also guess what your rating would be for movies you have not seen).
# COMMAND ----------
# TODO: Replace <FILL IN> with appropriate code
from pyspark.sql import Row
my_user_id = 0
# Note that the movie IDs are the *last* number on each line. A common error was to use the number of ratings as the movie ID.
my_rated_movies = [
(my_user_id,912,3),
(my_user_id,4973,3),
(my_user_id,858,4),
(my_user_id,527,4),
(my_user_id,1221,4),
(my_user_id,58559,4),
(my_user_id,1198,4),
(my_user_id,260,4),
(my_user_id,1196,4),
(my_user_id,2571,4),
(my_user_id,593,4),
(my_user_id,7153,4),
(my_user_id,4993,4),
(my_user_id,5952,4),
(my_user_id,6016,5),
(my_user_id,2959,5),
(my_user_id,94466,5),
(my_user_id,4226,5),
(my_user_id,296,5),
(my_user_id,1136,5),
(my_user_id,79132,5),
(my_user_id,2858,5),
(my_user_id,2329,5),
(my_user_id,3462,5),
(my_user_id,92259,5),
(my_user_id,27773,5),
(my_user_id,1089,5),
(my_user_id,112552,5),
(my_user_id,1225,5),
(my_user_id,5995,5),
(my_user_id,47,5),
(my_user_id,3677,5)
# The format of each line is (my_user_id, movie ID, your rating)
# For example, to give the movie "Star Wars: Episode IV - A New Hope (1977)" a five rating, you would add the following line:
# (my_user_id, 260, 5),
]
my_ratings_df = sqlContext.createDataFrame(my_rated_movies, ['userId','movieId','rating'])
print 'My movie ratings:'
display(my_ratings_df.limit(10))
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3b) Add Your Movies to Training Dataset
# MAGIC
# MAGIC Now that you have ratings for yourself, you need to add your ratings to the `training` dataset so that the model you train will incorporate your preferences. Spark's [unionAll()](http://spark.apache.org/docs/1.6.2/api/python/pyspark.sql.html#pyspark.sql.DataFrame.unionAll) transformation combines two DataFrames; use `unionAll()` to create a new training dataset that includes your ratings and the data in the original training dataset.
# COMMAND ----------
# TODO: Replace <FILL IN> with appropriate code
training_with_my_ratings_df = training_df.unionAll(my_ratings_df)
print ('The training dataset now has %s more entries than the original training dataset' %
(training_with_my_ratings_df.count() - training_df.count()))
assert (training_with_my_ratings_df.count() - training_df.count()) == my_ratings_df.count()
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3c) Train a Model with Your Ratings
# MAGIC
# MAGIC Now, train a model with your ratings added and the parameters you used in in part (2b) and (2c). Mke sure you include **all** of the parameters.
# MAGIC
# MAGIC **Note**: This cell will take about 30 seconds to run.
# COMMAND ----------
# TODO: Replace <FILL IN> with appropriate code
# Reset the parameters for the ALS object.
als.setPredictionCol("prediction")\
.setMaxIter(5)\
.setSeed(seed)\
.setUserCol('userId')\
.setItemCol('movieId')\
.setRatingCol('rating')\
.setRank(best_rank)
# Create the model with these parameters.
my_ratings_model = als.fit(training_with_my_ratings_df)
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3d) Check RMSE for the New Model with Your Ratings
# MAGIC
# MAGIC Compute the RMSE for this new model on the test set.
# MAGIC * Run your model (the one you just trained) against the test data set in `test_df`.
# MAGIC * Then, use our previously-computed `reg_eval` object to compute the RMSE of your ratings.
# COMMAND ----------
# TODO: Replace <FILL IN> with appropriate code
my_predict_df = my_ratings_model.transform(test_df)
# Remove NaN values from prediction (due to SPARK-14489)
predicted_test_my_ratings_df = my_predict_df.filter(my_predict_df.prediction != float('nan'))
# Run the previously created RMSE evaluator, reg_eval, on the predicted_test_my_ratings_df DataFrame
test_RMSE_my_ratings = reg_eval.evaluate(predicted_test_my_ratings_df)
print('The model had a RMSE on the test set of {0}'.format(test_RMSE_my_ratings))
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3e) Predict Your Ratings
# MAGIC
# MAGIC So far, we have only computed the error of the model. Next, let's predict what ratings you would give to the movies that you did not already provide ratings for.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Filter out the movies you already rated manually. (Use the `my_rated_movie_ids` variable.) Put the results in a new `not_rated_df`.
# MAGIC
# MAGIC **Hint**: The [Column.isin()](http://spark.apache.org/docs/1.6.2/api/python/pyspark.sql.html#pyspark.sql.Column.isin)
# MAGIC method, as well as the `~` ("not") DataFrame logical operator, may come in handy here. Here's an example of using `isin()`:
# MAGIC
# MAGIC ```
# MAGIC > df1 = sqlContext.createDataFrame([("Jim", 10), ("Julie", 9), ("Abdul", 20), ("Mireille", 19)], ["name", "age"])
# MAGIC > df1.show()
# MAGIC +--------+---+
# MAGIC | name|age|
# MAGIC +--------+---+
# MAGIC | Jim| 10|
# MAGIC | Julie| 9|
# MAGIC | Abdul| 20|
# MAGIC |Mireille| 19|
# MAGIC +--------+---+
# MAGIC
# MAGIC > names_to_delete = ["Julie", "Abdul"] # this is just a Python list
# MAGIC > df2 = df1.filter(~ df1["name"].isin(names_to_delete)) # "NOT IN"
# MAGIC > df2.show()
# MAGIC +--------+---+
# MAGIC | name|age|
# MAGIC +--------+---+
# MAGIC | Jim| 10|
# MAGIC |Mireille| 19|
# MAGIC +--------+---+
# MAGIC ```
# MAGIC
# MAGIC * Transform `not_rated_df` into `my_unrated_movies_df` by:
# MAGIC - renaming the "ID" column to "movieId"
# MAGIC - adding a "userId" column with the value contained in the `my_user_id` variable defined above.
# MAGIC
# MAGIC * Create a `predicted_ratings_df` DataFrame by applying `my_ratings_model` to `my_unrated_movies_df`.
# COMMAND ----------
# TODO: Replace <FILL_IN> with the appropriate code
# Create a list of my rated movie IDs
my_rated_movie_ids = [x[1] for x in my_rated_movies]
# Filter out the movies I already rated.
not_rated_df = movies_df.filter(~ movies_df['ID'].isin(my_rated_movie_ids))
#Rename the "ID" column to be "movieId", and add a column with my_user_id as "userId".
my_unrated_movies_df = not_rated_df.withColumnRenamed('ID', 'movieId').withColumn("userId", F.lit(0))
# Use my_rating_model to predict ratings for the movies that I did not manually rate.
raw_predicted_ratings_df = my_ratings_model.transform(my_unrated_movies_df)
predicted_ratings_df = raw_predicted_ratings_df.filter(raw_predicted_ratings_df['prediction'] != float('nan'))
# COMMAND ----------
# MAGIC %md
# MAGIC ### (3f) Predict Your Ratings
# MAGIC
# MAGIC We have our predicted ratings. Now we can print out the 25 movies with the highest predicted ratings.
# MAGIC
# MAGIC The steps you should perform are:
# MAGIC * Join your `predicted_ratings_df` DataFrame with the `movie_names_with_avg_ratings_df` DataFrame to obtain the ratings counts for each movie.
# MAGIC * Sort the resulting DataFrame (`predicted_with_counts_df`) by predicted rating (highest ratings first), and remove any ratings with a count of 75 or less.
# MAGIC * Print the top 25 movies that remain.
# COMMAND ----------
# TODO: Replace <FILL_IN> with the appropriate code
predicted_with_counts_df = \
predicted_ratings_df.join(movie_names_with_avg_ratings_df,
predicted_ratings_df['movieId']
== movie_names_with_avg_ratings_df['movieId'
]).select(movie_names_with_avg_ratings_df['movieId'
], movie_names_with_avg_ratings_df['title'],
movie_names_with_avg_ratings_df['average'],
predicted_ratings_df['prediction'],
movie_names_with_avg_ratings_df['count'])
predicted_highest_rated_movies_df = \
predicted_with_counts_df.sort('prediction',
ascending=False).filter('count > 75')
print 'My 25 highest rated movies as predicted (for movies with more than 75 reviews):'
display(predicted_highest_rated_movies_df.take(25))
# COMMAND ----------
|
py
|
1a5dec7b6db583fe693d4cdce1cdb9b2ae8a0dc1
|
import cv2
import numpy as np
import torch
def get_frames(filepath, max_frames=1e7, verbose=1000):
vidcap = cv2.VideoCapture(filepath)
success,image = vidcap.read()
count = 0
data = []
while success and count < max_frames:
# save frame as JPEG file
success, image = vidcap.read()
data.append(image / 255)
count += 1
if verbose != -1 and count%verbose==0:
print("Loading video %s: %.2f%%" % (filepath, count * 100 / max_frames))
data = np.array(data)
data = torch.as_tensor(data)
return data.permute(0, 3, 1, 2)
def decompose(file_path, save_path, batch_size=64):
import os
vidcap = cv2.VideoCapture(file_path)
success,preimage = vidcap.read()
count = 0
fake_count = 0
while success:
# save frame as JPEG file
success = vidcap.grab()
if count%1==0 and count > 59950:
success,image = vidcap.read()
image = torch.from_numpy(np.transpose((image / 255), (2, 0, 1))).unsqueeze(0)
torch.save(image, os.path.join(save_path, 'frame' + str(fake_count)))
fake_count += 1
print(fake_count)
count += 1
|
py
|
1a5dee7f756ea78d4189d8898db8e867610ceb39
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .web_api_connected_service_ref import WebApiConnectedServiceRef
class WebApiConnectedServiceDetails(WebApiConnectedServiceRef):
"""WebApiConnectedServiceDetails.
:param id:
:type id: str
:param url:
:type url: str
:param connected_service_meta_data: Meta data for service connection
:type connected_service_meta_data: :class:`WebApiConnectedService <core.v4_0.models.WebApiConnectedService>`
:param credentials_xml: Credential info
:type credentials_xml: str
:param end_point: Optional uri to connect directly to the service such as https://windows.azure.com
:type end_point: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'connected_service_meta_data': {'key': 'connectedServiceMetaData', 'type': 'WebApiConnectedService'},
'credentials_xml': {'key': 'credentialsXml', 'type': 'str'},
'end_point': {'key': 'endPoint', 'type': 'str'}
}
def __init__(self, id=None, url=None, connected_service_meta_data=None, credentials_xml=None, end_point=None):
super(WebApiConnectedServiceDetails, self).__init__(id=id, url=url)
self.connected_service_meta_data = connected_service_meta_data
self.credentials_xml = credentials_xml
self.end_point = end_point
|
py
|
1a5deec9297ccd5bd80c2f4f281d0e3ef6226d62
|
import pytest
from celery.result import EagerResult
from portxpress.users.tasks import get_users_count
from portxpress.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def test_user_count(settings):
"""A basic test to execute the get_users_count Celery task."""
UserFactory.create_batch(3)
settings.CELERY_TASK_ALWAYS_EAGER = True
task_result = get_users_count.delay()
assert isinstance(task_result, EagerResult)
assert task_result.result == 3
|
py
|
1a5deed29f4c9f9674f556173bdca494cf389bbc
|
import os
import sys
py_path = os.path.abspath(sys.argv[0])
py_dir = os.path.dirname(py_path)
py_file, py_ext = os.path.splitext(os.path.basename(py_path))
|
py
|
1a5df22b0b4039677c39b8fa95b9b08c790384cc
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
import numpy as np
import torch
from reagent.models.mdn_rnn import MDNRNNMemoryPool, gmm_loss
from reagent.models.world_model import MemoryNetwork
from reagent.parameters import MDNRNNTrainerParameters
from reagent.reporting.world_model_reporter import WorldModelReporter
from reagent.test.world_model.simulated_world_model import SimulatedWorldModel
from reagent.training.world_model.mdnrnn_trainer import MDNRNNTrainer
from torch.distributions.categorical import Categorical
from torch.distributions.normal import Normal
logger = logging.getLogger(__name__)
class TestMDNRNN(unittest.TestCase):
def test_gmm_loss(self):
# seq_len x batch_size x gaussian_size x feature_size
# 1 x 1 x 2 x 2
mus = torch.Tensor([[[[0.0, 0.0], [6.0, 6.0]]]])
sigmas = torch.Tensor([[[[2.0, 2.0], [2.0, 2.0]]]])
# seq_len x batch_size x gaussian_size
pi = torch.Tensor([[[0.5, 0.5]]])
logpi = torch.log(pi)
# seq_len x batch_size x feature_size
batch = torch.Tensor([[[3.0, 3.0]]])
gl = gmm_loss(batch, mus, sigmas, logpi)
# first component, first dimension
n11 = Normal(mus[0, 0, 0, 0], sigmas[0, 0, 0, 0])
# first component, second dimension
n12 = Normal(mus[0, 0, 0, 1], sigmas[0, 0, 0, 1])
p1 = (
pi[0, 0, 0]
* torch.exp(n11.log_prob(batch[0, 0, 0]))
* torch.exp(n12.log_prob(batch[0, 0, 1]))
)
# second component, first dimension
n21 = Normal(mus[0, 0, 1, 0], sigmas[0, 0, 1, 0])
# second component, second dimension
n22 = Normal(mus[0, 0, 1, 1], sigmas[0, 0, 1, 1])
p2 = (
pi[0, 0, 1]
* torch.exp(n21.log_prob(batch[0, 0, 0]))
* torch.exp(n22.log_prob(batch[0, 0, 1]))
)
logger.info(
"gmm loss={}, p1={}, p2={}, p1+p2={}, -log(p1+p2)={}".format(
gl, p1, p2, p1 + p2, -(torch.log(p1 + p2))
)
)
assert -(torch.log(p1 + p2)) == gl
def test_mdnrnn_simulate_world_cpu(self):
self._test_mdnrnn_simulate_world()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_mdnrnn_simulate_world_gpu(self):
self._test_mdnrnn_simulate_world(use_gpu=True)
def _test_mdnrnn_simulate_world(self, use_gpu=False):
num_epochs = 300
num_episodes = 400
batch_size = 200
action_dim = 2
seq_len = 5
state_dim = 2
simulated_num_gaussians = 2
mdrnn_num_gaussians = 2
simulated_num_hidden_layers = 1
simulated_num_hiddens = 3
mdnrnn_num_hidden_layers = 1
mdnrnn_num_hiddens = 10
adam_lr = 0.01
replay_buffer = MDNRNNMemoryPool(max_replay_memory_size=num_episodes)
swm = SimulatedWorldModel(
action_dim=action_dim,
state_dim=state_dim,
num_gaussians=simulated_num_gaussians,
lstm_num_hidden_layers=simulated_num_hidden_layers,
lstm_num_hiddens=simulated_num_hiddens,
)
possible_actions = torch.eye(action_dim)
for _ in range(num_episodes):
cur_state_mem = torch.zeros((seq_len, state_dim))
next_state_mem = torch.zeros((seq_len, state_dim))
action_mem = torch.zeros((seq_len, action_dim))
reward_mem = torch.zeros(seq_len)
not_terminal_mem = torch.zeros(seq_len)
next_mus_mem = torch.zeros((seq_len, simulated_num_gaussians, state_dim))
swm.init_hidden(batch_size=1)
next_state = torch.randn((1, 1, state_dim))
for s in range(seq_len):
cur_state = next_state
action = possible_actions[np.random.randint(action_dim)].view(
1, 1, action_dim
)
next_mus, reward = swm(action, cur_state)
not_terminal = 1
if s == seq_len - 1:
not_terminal = 0
# randomly draw for next state
next_pi = torch.ones(simulated_num_gaussians) / simulated_num_gaussians
index = Categorical(next_pi).sample((1,)).long().item()
next_state = next_mus[0, 0, index].view(1, 1, state_dim)
cur_state_mem[s] = cur_state.detach()
action_mem[s] = action
reward_mem[s] = reward.detach()
not_terminal_mem[s] = not_terminal
next_state_mem[s] = next_state.detach()
next_mus_mem[s] = next_mus.detach()
replay_buffer.insert_into_memory(
cur_state_mem, action_mem, next_state_mem, reward_mem, not_terminal_mem
)
num_batch = num_episodes // batch_size
mdnrnn_params = MDNRNNTrainerParameters(
hidden_size=mdnrnn_num_hiddens,
num_hidden_layers=mdnrnn_num_hidden_layers,
minibatch_size=batch_size,
learning_rate=adam_lr,
num_gaussians=mdrnn_num_gaussians,
)
mdnrnn_net = MemoryNetwork(
state_dim=state_dim,
action_dim=action_dim,
num_hiddens=mdnrnn_params.hidden_size,
num_hidden_layers=mdnrnn_params.num_hidden_layers,
num_gaussians=mdnrnn_params.num_gaussians,
)
if use_gpu:
mdnrnn_net = mdnrnn_net.cuda()
trainer = MDNRNNTrainer(memory_network=mdnrnn_net, params=mdnrnn_params)
trainer.reporter = WorldModelReporter(1)
for e in range(num_epochs):
for i in range(num_batch):
training_batch = replay_buffer.sample_memories(
batch_size, use_gpu=use_gpu
)
trainer.train(training_batch)
trainer.reporter.finish_epoch()
report = trainer.reporter.publish().training_report.oss_world_model_report
loss = np.mean(report.loss)
bce = np.mean(report.bce)
gmm = np.mean(report.gmm)
mse = np.mean(report.mse)
logger.info(
f"{e}-th epoch: \n" f"loss={loss}, bce={bce}, gmm={gmm}, mse={mse}"
)
if loss < 0 and gmm < -3.0 and bce < 0.6 and mse < 0.2:
return
raise RuntimeError("losses not reduced significantly during training")
|
py
|
1a5df377b0f69c566f3f331a60f5c28b663d80a4
|
"""Plot word counts."""
import argparse
import pandas as pd
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="Plot word counts")
parser.add_argument('infile', type=argparse.FileType('r'),
nargs='?', default='-',
help='Word count csv file name')
parser.add_argument('--xlim', type=float, nargs=2,
metavar=('XMIN', 'XMAX'),
default=None, help='X-axis limits')
parser.add_argument('--outfile', type=str,
default='plotcounts.png',
help='Output image file name')
args = parser.parse_args()
df = pd.read_csv(args.infile, header=None,
names=('word', 'word_frequency'))
df['rank'] = df['word_frequency'].rank(ascending=False,
method='max')
df['inverse_rank'] = 1 / df['rank']
ax = df.plot.scatter(x='word_frequency',
y='inverse_rank',
figsize=[12, 6],
grid=True,
xlim=args.xlim)
plt.savefig(args.outfile)
|
py
|
1a5df4755eef589a3f34066cb7e0869792ec42f4
|
from unittest import mock
from django.urls import reverse
from lego.apps.users import constants
from lego.apps.users.models import AbakusGroup, User
from lego.apps.users.registrations import Registrations
from lego.utils.test_utils import BaseAPITestCase
def _get_list_request_url():
return reverse("api:v1:student-confirmation-request-list")
def _get_list_perform_url():
return reverse("api:v1:student-confirmation-perform-list")
def _get_student_confirmation_token_request_url(token):
return f"{_get_list_request_url()}?token={token}"
def _get_student_confirmation_token_perform_url(token):
return f"{_get_list_perform_url()}?token={token}"
class RetrieveStudentConfirmationAPITestCase(BaseAPITestCase):
fixtures = ["test_abakus_groups.yaml", "test_users.yaml"]
def setUp(self):
self.user_with_student_confirmation = User.objects.get(username="test1")
self.user_without_student_confirmation = User.objects.get(username="test2")
def test_with_unauthenticated_user(self):
response = self.client.get(_get_list_request_url())
self.assertEqual(response.status_code, 401)
def test_without_token(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.get(_get_list_request_url())
self.assertEqual(response.status_code, 400)
def test_with_empty_token(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.get(_get_student_confirmation_token_request_url(""))
self.assertEqual(response.status_code, 400)
def test_with_invalid_token(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.get(
_get_student_confirmation_token_request_url("InvalidToken")
)
self.assertEqual(response.status_code, 400)
def test_with_valid_token(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.get(
_get_student_confirmation_token_request_url(
Registrations.generate_student_confirmation_token(
"teststudentusername", constants.DATA, True
)
)
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json().get("studentUsername"), "teststudentusername")
self.assertEqual(response.json().get("course"), constants.DATA)
self.assertEqual(response.json().get("member"), True)
def test_with_valid_token_and_capitalized_student_username(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.get(
_get_student_confirmation_token_request_url(
Registrations.generate_student_confirmation_token(
"TestStudentUsername", constants.DATA, True
)
)
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json().get("studentUsername"), "teststudentusername")
self.assertEqual(response.json().get("course"), constants.DATA)
self.assertEqual(response.json().get("member"), True)
class CreateStudentConfirmationAPITestCase(BaseAPITestCase):
fixtures = ["test_abakus_groups.yaml", "test_users.yaml"]
_test_student_confirmation_data = {
"student_username": "newteststudentusername",
"course": constants.DATA,
"member": True,
"captcha_response": "testCaptcha",
}
def setUp(self):
grade = AbakusGroup.objects.create(
name=constants.FIRST_GRADE_DATA, type=constants.GROUP_GRADE
)
self.user_with_student_confirmation = User.objects.get(username="test1")
grade.add_user(self.user_with_student_confirmation)
self.user_without_student_confirmation = User.objects.get(username="test2")
def test_with_unauthenticated_user(self):
response = self.client.post(_get_list_request_url())
self.assertEqual(response.status_code, 401)
def test_without_data(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.post(_get_list_request_url())
self.assertEqual(response.status_code, 400)
@mock.patch(
"lego.apps.users.serializers.student_confirmation.verify_captcha",
return_value=True,
)
def test_with_existing_data(self, *args):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.post(
_get_list_request_url(),
{
"student_username": "test1student",
"course": constants.DATA,
"member": True,
"captcha_response": "testCaptcha",
},
)
self.assertEqual(response.status_code, 400)
@mock.patch(
"lego.apps.users.serializers.student_confirmation.verify_captcha",
return_value=True,
)
def test_with_invalid_data_keys(self, *args):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.post(
_get_list_request_url(),
{
"wrong_username": "newteststudentusername",
"wrong_course": constants.DATA,
"wrong_member": True,
"wrong_captcha_response": "testCaptcha",
},
)
self.assertEqual(response.status_code, 400)
@mock.patch(
"lego.apps.users.serializers.student_confirmation.verify_captcha",
return_value=True,
)
def test_with_invalid_student_username(self, *args):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
invalid_data = self._test_student_confirmation_data.copy()
invalid_data["student_username"] = "test_u$er@"
response = self.client.post(_get_list_request_url(), invalid_data)
self.assertEqual(response.status_code, 400)
@mock.patch(
"lego.apps.users.serializers.student_confirmation.verify_captcha",
return_value=True,
)
def test_with_invalid_course(self, *args):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
invalid_data = self._test_student_confirmation_data.copy()
invalid_data["course"] = "test"
response = self.client.post(_get_list_request_url(), invalid_data)
self.assertEqual(response.status_code, 400)
@mock.patch(
"lego.apps.users.serializers.student_confirmation.verify_captcha",
return_value=True,
)
def test_with_invalid_member_boolean(self, *args):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
invalid_data = self._test_student_confirmation_data.copy()
invalid_data["member"] = "test"
response = self.client.post(_get_list_request_url(), invalid_data)
self.assertEqual(response.status_code, 400)
@mock.patch(
"lego.apps.users.serializers.student_confirmation.verify_captcha",
return_value=True,
)
def test_with_already_confirmed_student_username(self, mock_verify_captcha):
AbakusGroup.objects.get(name="Abakus").add_user(
self.user_with_student_confirmation
)
self.client.force_authenticate(self.user_with_student_confirmation)
response = self.client.post(
_get_list_request_url(), self._test_student_confirmation_data
)
self.assertEqual(response.status_code, 400)
@mock.patch(
"lego.apps.users.serializers.student_confirmation.verify_captcha",
return_value=False,
)
def test_with_invalid_captcha(self, *args):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.post(
_get_list_request_url(), self._test_student_confirmation_data
)
self.assertEqual(response.status_code, 400)
@mock.patch(
"lego.apps.users.serializers.student_confirmation.verify_captcha",
return_value=True,
)
def test_with_valid_captcha(self, mock_verify_captcha):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.post(
_get_list_request_url(), self._test_student_confirmation_data
)
self.assertEqual(response.status_code, 204)
class UpdateStudentConfirmationAPITestCase(BaseAPITestCase):
fixtures = ["initial_files.yaml", "initial_abakus_groups.yaml", "test_users.yaml"]
def setUp(self):
grade = AbakusGroup.objects.get(name=constants.FIRST_GRADE_DATA)
self.user_with_student_confirmation = User.objects.get(username="test1")
grade.add_user(self.user_with_student_confirmation)
self.user_with_student_confirmation = User.objects.get(username="test1")
self.user_without_student_confirmation = User.objects.get(username="test2")
self.user_with_grade_group_but_no_student_confirmation = User.objects.get(
username="pleb"
)
def create_token(
self, student_username="newstudentusername", course=constants.DATA, member=True
):
return Registrations.generate_student_confirmation_token(
student_username, course, member
)
def test_without_authenticated_user(self):
response = self.client.post(
_get_student_confirmation_token_request_url("randomToken")
)
self.assertEqual(response.status_code, 401)
def test_without_token(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.post(_get_list_perform_url())
self.assertEqual(response.status_code, 400)
def test_with_empty_token(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.post(_get_list_perform_url())
self.assertEqual(response.status_code, 400)
def test_with_invalid_token(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
response = self.client.post(
_get_student_confirmation_token_perform_url("InvalidToken")
)
self.assertEqual(response.status_code, 400)
def test_with_already_confirmed_student_username(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_with_student_confirmation
)
self.client.force_authenticate(self.user_with_student_confirmation)
token = self.create_token()
response = self.client.post(_get_student_confirmation_token_perform_url(token))
self.assertEqual(response.status_code, 400)
def test_without_abakus_member_checked_and_komtek_course(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
token = self.create_token(course=constants.KOMTEK, member=False)
response = self.client.post(_get_student_confirmation_token_perform_url(token))
self.assertEqual(response.status_code, 200)
user = self.user_without_student_confirmation
user_groups = user.all_groups
self.assertEqual(user.student_username, "newstudentusername")
self.assertEqual(user.is_staff, False)
# Test course groups
course_group = AbakusGroup.objects.get(name=constants.KOMTEK_LONG)
self.assertEqual(course_group in user_groups, True)
grade_group = AbakusGroup.objects.get(name=constants.FIRST_GRADE_KOMTEK)
self.assertEqual(grade_group in user_groups, True)
# Test member group
self.assertEqual(user.is_abakus_member, False)
member_group = AbakusGroup.objects.get(name=constants.MEMBER_GROUP)
self.assertEqual(member_group in user_groups, False)
def test_with_already_in_grade_group_but_not_abakus(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_with_grade_group_but_no_student_confirmation
)
AbakusGroup.objects.get(name="2. klasse Kommunikasjonsteknologi").add_user(
self.user_with_grade_group_but_no_student_confirmation
)
self.client.force_authenticate(
self.user_with_grade_group_but_no_student_confirmation
)
token = self.create_token(course=constants.KOMTEK, member=True)
response = self.client.post(_get_student_confirmation_token_perform_url(token))
self.assertEqual(response.status_code, 200)
user = self.user_with_grade_group_but_no_student_confirmation
user_groups = user.all_groups
self.assertEqual(user.student_username, "newstudentusername")
self.assertEqual(user.is_staff, False)
# Test course groups
course_group = AbakusGroup.objects.get(name=constants.KOMTEK_LONG)
self.assertEqual(course_group in user_groups, True)
grade_group = AbakusGroup.objects.get(name=constants.FIRST_GRADE_KOMTEK)
self.assertEqual(grade_group in user_groups, False)
grade_group = AbakusGroup.objects.get(name="2. klasse Kommunikasjonsteknologi")
self.assertEqual(grade_group in user_groups, True)
# Test member group
self.assertEqual(user.is_abakus_member, True)
member_group = AbakusGroup.objects.get(name=constants.MEMBER_GROUP)
self.assertEqual(member_group in user_groups, True)
def test_with_abakus_member_checked(self):
AbakusGroup.objects.get(name="Users").add_user(
self.user_without_student_confirmation
)
self.client.force_authenticate(self.user_without_student_confirmation)
token = self.create_token()
response = self.client.post(_get_student_confirmation_token_perform_url(token))
self.assertEqual(response.status_code, 200)
user = self.user_without_student_confirmation
user_groups = user.all_groups
self.assertEqual(user.is_staff, False)
# Test user data
self.assertEqual(user.student_username, "newstudentusername")
self.assertEqual(user.is_staff, False)
# Test course groups
course_group = AbakusGroup.objects.get(name=constants.DATA_LONG)
self.assertEqual(course_group in user_groups, True)
grade_group = AbakusGroup.objects.get(name=constants.FIRST_GRADE_DATA)
self.assertEqual(grade_group in user_groups, True)
# Test member group
self.assertEqual(user.is_abakus_member, True)
member_group = AbakusGroup.objects.get(name=constants.MEMBER_GROUP)
self.assertEqual(member_group in user_groups, True)
|
py
|
1a5df4a68e61850a7b679b1c7b7086c4926b89c5
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
from parlai.core.build_data import DownloadableFile
from parlai.utils.io import PathManager
import parlai.core.build_data as build_data
import os
import json
VERSION = '1'
TRAIN_FILENAME = 'hotpot_train_v{}.1.json'.format(VERSION)
DEV_DISTRACTOR_FILENAME = 'hotpot_dev_distractor_v{}.json'.format(VERSION)
DEV_FULLWIKI_FILENAME = 'hotpot_dev_fullwiki_v{}.json'.format(VERSION)
RESOURCES = [
DownloadableFile(
'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_train_v1.1.json',
'hotpot_train_v1.1.json',
'26650cf50234ef5fb2e664ed70bbecdfd87815e6bffc257e068efea5cf7cd316',
zipped=False,
),
DownloadableFile(
'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_distractor_v1.json',
'hotpot_dev_distractor_v1.json',
'4e9ecb5c8d3b719f624d66b60f8d56bf227f03914f5f0753d6fa1b359d7104ea',
zipped=False,
),
DownloadableFile(
'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_fullwiki_v1.json',
'hotpot_dev_fullwiki_v1.json',
'2f1f3e594a3066a3084cc57950ca2713c24712adaad03af6ccce18d1846d5618',
zipped=False,
),
]
OUTPUT_FORMAT = 'text:{context_question}\t' 'labels:{answer}'
def _handle_data_point(data_point):
output = []
context_question_txt = ""
for [title, sentences_list] in data_point['context']:
sentences = '\\n'.join(sentences_list)
context_question_txt += '{}\\n{}\\n\\n'.format(title, sentences)
context_question_txt += data_point['question']
output = OUTPUT_FORMAT.format(
context_question=context_question_txt, answer=data_point['answer']
)
output += '\t\tepisode_done:True\n'
return output
def make_parlai_format(outpath, dtype, data):
print('building parlai:' + dtype)
with PathManager.open(os.path.join(outpath, dtype + '.txt'), 'w') as fout:
for data_point in data:
fout.write(_handle_data_point(data_point))
def build(opt):
dpath = os.path.join(opt['datapath'], 'HotpotQA')
if not build_data.built(dpath, version_string=VERSION):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
with PathManager.open(os.path.join(dpath, TRAIN_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'train', data)
with PathManager.open(os.path.join(dpath, DEV_DISTRACTOR_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'valid_distractor', data)
with PathManager.open(os.path.join(dpath, DEV_FULLWIKI_FILENAME)) as f:
data = json.load(f)
make_parlai_format(dpath, 'valid_fullwiki', data)
# Mark the data as built.
build_data.mark_done(dpath, version_string=VERSION)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.