max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
alipay/aop/api/domain/SpecialVoucher.py | antopen/alipay-sdk-python-all | 213 | 11160521 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SpecialVoucher(object):
def __init__(self):
self._floor_amount = None
self._goods_name = None
self._origin_amount = None
self._special_amount = None
@property
def floor_amount(self):
return self._floor_amount
@floor_amount.setter
def floor_amount(self, value):
self._floor_amount = value
@property
def goods_name(self):
return self._goods_name
@goods_name.setter
def goods_name(self, value):
self._goods_name = value
@property
def origin_amount(self):
return self._origin_amount
@origin_amount.setter
def origin_amount(self, value):
self._origin_amount = value
@property
def special_amount(self):
return self._special_amount
@special_amount.setter
def special_amount(self, value):
self._special_amount = value
def to_alipay_dict(self):
params = dict()
if self.floor_amount:
if hasattr(self.floor_amount, 'to_alipay_dict'):
params['floor_amount'] = self.floor_amount.to_alipay_dict()
else:
params['floor_amount'] = self.floor_amount
if self.goods_name:
if hasattr(self.goods_name, 'to_alipay_dict'):
params['goods_name'] = self.goods_name.to_alipay_dict()
else:
params['goods_name'] = self.goods_name
if self.origin_amount:
if hasattr(self.origin_amount, 'to_alipay_dict'):
params['origin_amount'] = self.origin_amount.to_alipay_dict()
else:
params['origin_amount'] = self.origin_amount
if self.special_amount:
if hasattr(self.special_amount, 'to_alipay_dict'):
params['special_amount'] = self.special_amount.to_alipay_dict()
else:
params['special_amount'] = self.special_amount
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SpecialVoucher()
if 'floor_amount' in d:
o.floor_amount = d['floor_amount']
if 'goods_name' in d:
o.goods_name = d['goods_name']
if 'origin_amount' in d:
o.origin_amount = d['origin_amount']
if 'special_amount' in d:
o.special_amount = d['special_amount']
return o
|
Engine/Includes/Lua/Modules/python/test.py | GCourtney27/Retina-Engine | 141 | 11160527 | <reponame>GCourtney27/Retina-Engine<filename>Engine/Includes/Lua/Modules/python/test.py
print "----- import lua -----"
import lua
print "----- lg = lua.globals() -----"
lg = lua.globals()
print "lg:", lg
print "lg._G:", lg._G
print "lg['_G']:", lg['_G']
print "----- lg.foo = \"bar\" -----"
lg.foo = 'bar'
print "----- lg.tmp = [] -----"
lg.tmp = []
print "----- print lg.tmp -----"
print lg.tmp
print "----- lua.execute(\"xxx = {1,2,3,foo={4,5}}\") -----"
lua.execute("xxx = {1,2,3,foo={4,5}}")
print "----- print lg.xxx[1] -----"
print lg.xxx[1]
print "----- print lg.xxx[2] -----"
print lg.xxx[2]
print "----- print lg.xxx[3] -----"
print lg.xxx[3]
print "----- print lg.xxx['foo'][1] -----"
print lg.xxx['foo'][1]
print "lua.require =", lua.require
try:
lua.require("foo")
except:
print "lua.require('foo') raised an exception"
|
tests/r/test_natural_park.py | hajime9652/observations | 199 | 11160557 | <filename>tests/r/test_natural_park.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.natural_park import natural_park
def test_natural_park():
"""Test module natural_park.py by downloading
natural_park.csv and testing shape of
extracted data has 312 rows and 7 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = natural_park(test_path)
try:
assert x_train.shape == (312, 7)
except:
shutil.rmtree(test_path)
raise()
|
apps/pay/views.py | aeasringnar/-django-RESTfulAPI | 242 | 11160563 | <reponame>aeasringnar/-django-RESTfulAPI
import uuid
import os
import requests
import json
import re
import time
import datetime
import random
import hashlib
from xml.etree import ElementTree as et
from django.conf import settings
import hmac
import xml
from django.db.models import F, Q
from rest_framework import serializers, status, generics, mixins, viewsets
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet, GenericViewSet
from rest_framework.response import Response
from rest_framework.filters import SearchFilter, OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
# 官方JWT
# from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler ,jwt_response_payload_handler
# from rest_framework_jwt.authentication import JSONWebTokenAuthentication
# 缓存配置
from django.core.cache import cache
# 自定义的JWT配置 公共插件
from utils.utils import jwt_decode_handler,jwt_encode_handler,jwt_payload_handler,jwt_payload_handler,jwt_response_payload_handler,google_otp,VisitThrottle,getDistance,NormalObj
from utils.jwtAuth import JWTAuthentication
from utils.pagination import Pagination
from utils.permissions import JWTAuthPermission, AllowAllPermission, BaseAuthPermission
from .models import *
from .serializers import *
from .filters import *
from functools import reduce
from urllib.parse import unquote_plus
from django.views.decorators.csrf import csrf_exempt
from utils.WeChatPay import WeChatUnityPay
from utils.AliPay import AliPay
@csrf_exempt
@transaction.atomic
def wechat_notify_url(request):
'''
微信支付回调接口
'''
try:
print('请求方法:',request.method)
return_xml = """<xml><return_code><![CDATA[SUCCESS]]></return_code><return_msg><![CDATA[OK]]></return_msg></xml>"""
webData = request.body
print('回调返回信息:',webData)
if bool(webData):
xmlData = ET.fromstring(webData)
if xmlData.find('return_code').text != 'SUCCESS':
print('回调出现错误')
return HttpResponse(return_xml,content_type='application/xml;charset=utf-8')
else:
if xmlData.find('result_code').text != 'SUCCESS':
print('支付失败!')
return HttpResponse(return_xml,content_type='application/xml;charset=utf-8')
else:
print('订单支付成功...准备修改订单状态')
order_num = xmlData.find('out_trade_no').text
order = Order.objects.filter(order_num=order_num).first()
# 更新状态 更新支付时间 更新支付方式
order.status = 1
order.pay_time = datetime.datetime.now()
order.pay_type = 1
order.save()
# 整理库存和销量,当订单到这里时会将库存锁死
for detail in order.order_details.all():
detail.good_grade.sales += detail.buy_num
detail.good_grade.stock -= detail.buy_num
detail.good_grade.save()
return HttpResponse(return_xml,content_type='application/xml;charset=utf-8')
return HttpResponse(return_xml,content_type='application/xml;charset=utf-8')
except Exception as e:
print(e)
print({"message": "网络错误:%s"%str(e), "errorCode": 1, "data": {}})
return_xml = """<xml><return_code><![CDATA[SUCCESS]]></return_code><return_msg><![CDATA[OK]]></return_msg></xml>"""
return HttpResponse(return_xml,content_type='application/xml;charset=utf-8')
class AlipayNotifyUrlView(APIView):
def post(self, request):
"""
处理支付宝的notify_url
:param request:
:return:
"""
try:
processed_dict = {}
for key, value in request.data.items():
processed_dict[key] = value
if processed_dict:
print('支付宝的参数', processed_dict)
sign = processed_dict.pop("sign", None)
alipay = AliPay(method='alipay.trade.app.pay')
verify_re = alipay.verify(processed_dict, sign)
print('支付宝的参数', processed_dict)
print('检验参数结果', verify_re)
out_trade_no = processed_dict.get('out_trade_no', None)
trade_no = processed_dict.get('trade_no', None)
# response = VerifyAndDo(out_trade_no, pay_way='alipay')
order = Order.objects.filter(order_num=out_trade_no, status=0).first()
if order:
order.status = 1
order.wechat_order_num = trade_no
order.pay_time = datetime.datetime.now()
order.pay_type = 2
order.save()
# 整理库存和销量,当订单到这里时会将库存锁死
for detail in order.order_details.all():
detail.good_grade.sales += detail.buy_num
detail.good_grade.stock -= detail.buy_num
detail.good_grade.save()
else:
print('未找到订单编号为:的订单...' % order_num)
return Response('success')
except Exception as e:
print('发生错误:',e)
return Response({"message": "出现了无法预料的view视图错误:%s" % e, "errorCode": 1, "data": {}})
class WxPayViewSerializer(serializers.Serializer):
order_num = serializers.CharField() # 订单编号
class WeChatPricePayView(generics.GenericAPIView):
authentication_classes = (JWTAuthentication,)
serializer_class = WxPayViewSerializer
@transaction.atomic
def post(self,request):
'''
微信支付接口
'''
try:
json_data = {"message": "支付数据返回成功", "errorCode": 0, "data": {}}
if not request.auth:
return Response({"message": "请先登录", "errorCode": 2, "data": {}})
if request.user.group.group_type in ['SuperAdmin', 'Admin']:
return Response({"message": "非法用户,无法下单", "errorCode": 2, "data": {}})
serializer = self.get_serializer(data=request.data)
if not serializer.is_valid():
return Response({"message": str(serializer.errors), "errorCode": 4, "data": {}})
order_num = serializer.data.get('order_num')
order = Order.objects.filter(order_num=order_num, status=0).first()
if not order:
return Response({"message": '订单未找到,支付失败。', "errorCode": 3, "data": {}})
order_details = order.order_details.all()
check_stock = True
# 使用悲观锁梳理订单并发问题 考虑库存不足时是否将订单设置为失效
for item in order_details:
good_grade = GoodGrade.objects.select_for_update().get(id=item.good_grade_id)
if item.buy_num > good_grade.stock:
check_stock = False
break
if not check_stock:
return Response({"message": '有规格库存不足,无法发起支付。', "errorCode": 3, "data": {}})
price = int(float(str(order.all_price)) * 100)
wxpay_object = WeChatUnityPay(out_trade_no=order_num, body='订单' + order_num[12:], total_fee=price, trade_type='JSAPI', openid=request.user.open_id)
params = wxpay_object.re_finall()
print('最终得到返回给前端的参数:', params)
json_data['data'] = params
return Response(json_data)
except Exception as e:
print('发生错误:',e)
return Response({"message": "出现了无法预料的view视图错误:%s" % e, "errorCode": 1, "data": {}}) |
downstream/mosei/dataset.py | OlegJakushkin/s3prl | 856 | 11160596 | import random
import torch
import torch.nn as nn
from torch.utils.data.dataset import Dataset
import os
import torchaudio
'''
SAMPLE_RATE = 16000
EXAMPLE_WAV_MIN_SEC = 5
EXAMPLE_WAV_MAX_SEC = 15
EXAMPLE_DATASET_SIZE = 10000
'''
class MOSEIDataset(Dataset):
def __init__(self, split, data, path):
self.split = split
self.data = data
self.path = path
def __getitem__(self, idx):
wav_path = os.path.join(self.path, 'Segmented_Audio', self.split, self.data[idx][0])
wav, sr = torchaudio.load(wav_path)
label = self.data[idx][1]
'''
wav_sec = random.randint(EXAMPLE_WAV_MIN_SEC, EXAMPLE_WAV_MAX_SEC)
wav = torch.randn(SAMPLE_RATE * wav_sec)
label = random.randint(0, self.class_num - 1)
'''
return wav.view(-1), torch.tensor(label).long()
def __len__(self):
return len(self.data)
def collate_fn(self, samples):
wavs, labels = [], []
for wav, label in samples:
wavs.append(wav)
labels.append(label)
return wavs, labels
|
survae/transforms/__init__.py | alisiahkoohi/survae_flows | 262 | 11160674 | from .base import Transform, SequentialTransform
from .cond_base import ConditionalTransform
from .bijections import *
from .surjections import *
from .stochastic import *
|
setup.py | wagtail/telepath | 114 | 11160675 | <gh_stars>100-1000
#!/usr/bin/env python
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='telepath',
version='0.2',
description="A library for exchanging data between Python and JavaScript",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/wagtail/telepath',
packages=["telepath"],
include_package_data=True,
license='BSD',
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.5",
extras_require={
'docs': [
'mkdocs>=1.1,<1.2',
'mkdocs-material>=6.2,<6.3',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Framework :: Django',
],
)
|
moe/optimal_learning/python/interfaces/covariance_interface.py | dstoeckel/MOE | 966 | 11160678 | <filename>moe/optimal_learning/python/interfaces/covariance_interface.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
r"""Interface for covariance function: covariance of two points and spatial/hyperparameter derivatives.
.. Note:: comments are copied from the file comments of gpp_covariance.hpp
Covariance functions have a few fundamental properties (see references at the bottom for full details). In short,
they are SPSD (symmetric positive semi-definite): ``k(x,x') = k(x', x)`` for any ``x,x'`` and ``k(x,x) >= 0`` for all ``x``.
As a consequence, covariance matrices are SPD as long as the input points are all distinct.
Additionally, the Square Exponential and Matern covariances (as well as other functions) are stationary. In essence,
this means they can be written as ``k(r) = k(|x - x'|) = k(x, x') = k(x', x)``. So they operate on distances between
points as opposed to the points themselves. The name stationary arises because the covariance is the same
modulo linear shifts: ``k(x+a, x'+a) = k(x, x').``
Covariance functions are a fundamental component of gaussian processes: as noted in the gpp_math.hpp header comments,
gaussian processes are defined by a mean function and a covariance function. Covariance functions describe how
two random variables change in relation to each other--more explicitly, in a GP they specify how similar two points are.
The choice of covariance function is important because it encodes our assumptions about how the "world" behaves.
Covariance functions also generally have hyperparameters (e.g., signal/background noise, length scales) that specify the
assumed behavior of the Gaussian Process. Specifying hyperparameters is tricky because changing them fundamentally changes
the behavior of the GP. :mod:`moe.optimal_learning.python.interfaces.optimization_interface` together
with :mod:`moe.optimal_learning.python.interfaces.log_likelihood_interface` provide methods
optimizing and evaluating model fit, respectively.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
class CovarianceInterface(object):
r"""Interface for a covariance function: covariance of two points and spatial/hyperparameter derivatives.
.. Note:: comments are copied from the class comments of CovarianceInterface in gpp_covariance.hpp
Abstract class to enable evaluation of covariance functions--supports the evaluation of the covariance between two
points, as well as the gradient with respect to those coordinates and gradient/hessian with respect to the
hyperparameters of the covariance function.
Covariance operaters, ``cov(x_1, x_2)`` are SPD. Due to the symmetry, there is no need to differentiate wrt x_1 and x_2; hence
the gradient operation should only take gradients wrt dim variables, where ``dim = |x_1|``
Hyperparameters (denoted ``\theta_j``) are stored as class member data by subclasses.
Implementers of this ABC are required to manage their own hyperparameters.
TODO(GH-71): getter/setter for hyperparameters.
"""
__metaclass__ = ABCMeta
@abstractproperty
def num_hyperparameters(self):
"""Return the number of hyperparameters of this covariance function."""
pass
def get_hyperparameters(self):
"""Get the hyperparameters (array of float64 with shape (num_hyperparameters)) of this covariance."""
pass
def set_hyperparameters(self, hyperparameters):
"""Set hyperparameters to the specified hyperparameters; ordering must match.
:param hyperparameters: hyperparameters
:type hyperparameters: array of float64 with shape (num_hyperparameters)
"""
pass
hyperparameters = abstractproperty(get_hyperparameters, set_hyperparameters)
@abstractmethod
def covariance(self, point_one, point_two):
r"""Compute the covariance function of two points, cov(``point_one``, ``point_two``).
.. Note:: comments are copied from the matching method comments of CovarianceInterface in gpp_covariance.hpp
and comments are copied to the matching method comments of
:mod:`moe.optimal_learning.python.python_version.covariance.SquareExponential`.
The covariance function is guaranteed to be symmetric by definition: ``covariance(x, y) = covariance(y, x)``.
This function is also positive definite by definition.
:param point_one: first input, the point ``x``
:type point_one: array of float64 with shape (dim)
:param point_two: second input, the point ``y``
:type point_two: array of float64 with shape (dim)
:return: value of covariance between the input points
:rtype: float64
"""
pass
@abstractmethod
def grad_covariance(self, point_one, point_two):
r"""Compute the gradient of self.covariance(point_one, point_two) with respect to the FIRST argument, point_one.
.. Note:: comments are copied from the matching method comments of CovarianceInterface in gpp_covariance.hpp
and comments are copied to the matching method comments of
:mod:`moe.optimal_learning.python.python_version.covariance.SquareExponential`.
This distinction is important for maintaining the desired symmetry. ``Cov(x, y) = Cov(y, x)``.
Additionally, ``\pderiv{Cov(x, y)}{x} = \pderiv{Cov(y, x)}{x}``.
However, in general, ``\pderiv{Cov(x, y)}{x} != \pderiv{Cov(y, x)}{y}`` (NOT equal! These may differ by a negative sign)
Hence to avoid separate implementations for differentiating against first vs second argument, this function only handles
differentiation against the first argument. If you need ``\pderiv{Cov(y, x)}{x}``, just swap points x and y.
:param point_one: first input, the point ``x``
:type point_one: array of float64 with shape (dim)
:param point_two: second input, the point ``y``
:type point_two: array of float64 with shape (dim)
:return: grad_cov: i-th entry is ``\pderiv{cov(x_1, x_2)}{x_i}``
:rtype: array of float64 with shape (dim)
"""
pass
@abstractmethod
def hyperparameter_grad_covariance(self, point_one, point_two):
r"""Compute the gradient of self.covariance(point_one, point_two) with respect to its hyperparameters.
.. Note:: comments are copied from the matching method comments of CovarianceInterface in gpp_covariance.hpp
and comments are copied to the matching method comments of
:mod:`moe.optimal_learning.python.python_version.covariance.SquareExponential`.
Unlike GradCovariance(), the order of point_one and point_two is irrelevant here (since we are not differentiating against
either of them). Thus the matrix of grad covariances (wrt hyperparameters) is symmetric.
:param point_one: first input, the point ``x``
:type point_one: array of float64 with shape (dim)
:param point_two: second input, the point ``y``
:type point_two: array of float64 with shape (dim)
:return: grad_hyperparameter_cov: i-th entry is ``\pderiv{cov(x_1, x_2)}{\theta_i}``
:rtype: array of float64 with shape (num_hyperparameters)
"""
pass
@abstractmethod
def hyperparameter_hessian_covariance(self, point_one, point_two):
r"""Compute the hessian of self.covariance(point_one, point_two) with respect to its hyperparameters.
.. Note:: comments are copied from the matching method comments of CovarianceInterface in gpp_covariance.hpp
The Hessian matrix of the covariance evaluated at x_1, x_2 with respect to the hyperparameters. The Hessian is defined as:
``[ \ppderiv{cov}{\theta_0^2} \mixpderiv{cov}{\theta_0}{\theta_1} ... \mixpderiv{cov}{\theta_0}{\theta_{n-1}} ]``
``[ \mixpderiv{cov}{\theta_1}{\theta_0} \ppderiv{cov}{\theta_1^2 } ... \mixpderiv{cov}{\theta_1}{\theta_{n-1}} ]``
``[ ... ... ]``
``[ \mixpderiv{cov}{\theta_{n-1}{\theta_0} \mixpderiv{cov}{\theta_{n-1}{\theta_1} ... \ppderiv{cov}{\theta_{n-1}^2} ]``
where "cov" abbreviates covariance(x_1, x_2) and "n" refers to the number of hyperparameters.
Unless noted otherwise in subclasses, the Hessian is symmetric (due to the equality of mixed derivatives when a function
f is twice continuously differentiable).
Similarly to the gradients, the Hessian is independent of the order of ``x_1, x_2: H_{cov}(x_1, x_2) = H_{cov}(x_2, x_1)``
For further details: http://en.wikipedia.org/wiki/Hessian_matrix
:param point_one: first input, the point ``x``
:type point_one: array of float64 with shape(dim)
:param point_two: second input, the point ``y``
:type point_two: array of float64 with shape (dim)
:return: hessian_hyperparameter_cov: ``(i,j)``-th entry is ``\mixpderiv{cov(x_1, x_2)}{\theta_i}{\theta_j}``
:rtype: array of float64 with shape (num_hyperparameters, num_hyperparameters)
"""
pass
|
sen/tui/commands/widget.py | lachmanfrantisek/sen | 956 | 11160681 | """
widget specific commands
"""
import logging
from sen.tui.commands.base import register_command, SameThreadCommand
import urwidtrees
logger = logging.getLogger(__name__)
@register_command
class NavigateTopCommand(SameThreadCommand):
name = "navigate-top"
description = "go to first line"
def run(self):
# FIXME: refactor
if isinstance(self.buffer.widget, urwidtrees.TreeBox):
self.buffer.widget.focus_first()
else:
self.buffer.widget.set_focus(0)
self.buffer.widget.reload_widget()
@register_command
class NavigateBottomCommand(SameThreadCommand):
name = "navigate-bottom"
description = "go to last line"
def run(self):
# FIXME: refactor
if isinstance(self.buffer.widget, urwidtrees.TreeBox):
self.buffer.widget.focus_last()
else:
self.buffer.widget.set_focus(len(self.buffer.widget.body) - 1)
self.buffer.widget.reload_widget()
@register_command
class NavigateUpCommand(SameThreadCommand):
name = "navigate-up"
description = "go one line up"
def run(self):
return super(self.buffer.widget.__class__, self.buffer.widget).keypress(self.size, "up")
@register_command
class NavigateDownCommand(SameThreadCommand):
name = "navigate-down"
description = "go one line down"
def run(self):
return super(self.buffer.widget.__class__, self.buffer.widget).keypress(self.size, "down")
@register_command
class NavigateUpwardsCommand(SameThreadCommand):
name = "navigate-upwards"
description = "go 10 lines up"
def run(self):
if isinstance(self.buffer.widget, urwidtrees.TreeBox):
self.ui.notify_message("This movement is not available.", level="error")
return
try:
self.buffer.widget.set_focus(self.buffer.widget.get_focus()[1] - 10)
except IndexError:
self.buffer.widget.set_focus(0)
self.buffer.widget.reload_widget()
return
@register_command
class NavigateDownwardsCommand(SameThreadCommand):
name = "navigate-downwards"
description = "go 10 lines down"
def run(self):
if isinstance(self.buffer.widget, urwidtrees.TreeBox):
self.ui.notify_message("This movement is not available.", level="error")
return
try:
self.buffer.widget.set_focus(self.buffer.widget.get_focus()[1] + 10)
except IndexError:
self.buffer.widget.set_focus(len(self.buffer.widget.body) - 1)
self.buffer.widget.reload_widget()
return
|
tensorflow_lite_support/metadata/python/tests/metadata_writers/test_utils.py | khanhlvg/tflite-support | 242 | 11160691 | <filename>tensorflow_lite_support/metadata/python/tests/metadata_writers/test_utils.py<gh_stars>100-1000
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utils for MetadataWriter."""
import os
from typing import Union
from tensorflow.python.platform import resource_loader
def create_calibration_file(file_dir: str,
file_name: str = "score_calibration.txt",
content: str = "1.0,2.0,3.0,4.0") -> str:
"""Creates the calibration file."""
calibration_file = os.path.join(file_dir, file_name)
with open(calibration_file, mode="w") as file:
file.write(content)
return calibration_file
def load_file(file_name: str, mode: str = "rb") -> Union[str, bytes]:
"""Loads files from resources."""
file_path = get_resource_path(file_name)
with open(file_path, mode) as file:
return file.read()
def get_resource_path(file_name: str) -> str:
"""Gets resource path from the loader."""
return resource_loader.get_path_to_datafile(file_name)
|
L1Trigger/L1CaloTrigger/test/Phase1L1TJetHwEmuComp.py | ckamtsikis/cmssw | 852 | 11160709 | <reponame>ckamtsikis/cmssw
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import math
import numpy
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
ptLSB = 0.25;
etaLSB = 0.0043633231;
phiLSB = 0.0043633231;
hwJets = []
emJets = []
hwData = []
emData = []
hwDataNoZ = []
emDataNoZ = []
nHw = 0
nEm = 0
nEv = 5000
nEvNoZ = 0
for hwRx in range(0,70):
with open("datFull/" + str(hwRx) + "/tx_summary.txt", "r") as inFile:
frameIt = -1
for line in inFile:
if('1v' in line):
frameIt += 1
if frameIt < 20:
continue
if hwRx == 0 and frameIt < 33:
continue
linkData = line.split('1v')
for wordIt in range(1,25):
word = linkData[wordIt].replace(' ','').replace('\n','')
if int(word, 16) & 0xffff:
jet = word[8:]
hwJets.append([(int(jet,16)&0xffff)*ptLSB,
((((int(jet,16)>>24)&0xff)*19)+9)*etaLSB,
((((int(jet,16)>>16)&0xff)*20)+10)*phiLSB])
if (int(word, 16)>>32) & 0xffff:
jet = word[:8]
hwJets.append([(int(jet,16)&0xffff)*ptLSB,
((((int(jet,16)>>24)&0xff)*19)+9)*etaLSB,
((((int(jet,16)>>16)&0xff)*20)+10)*phiLSB])
if (frameIt%13) == 6:
if(nHw>=nEv):
break
nHw+=1
if len(hwJets)==0:
hwJets.append([0,0,0])
hwData.append(hwJets)
del hwJets
hwJets = []
with open("emuout.txt", "r") as inFile:
for line in inFile:
if " " in line:
if(nEm>=nEv):
break
nEm+=1
if len(emJets)>0:
emData.append(emJets)
del emJets
emJets = []
else:
jet = [float(line.split("\t")[0]),
float(line.split("\t")[1]),
float(line.split("\t")[2])]
emJets.append(jet)
nDiff = 0
for evIt in range(0,nEv):
if len(hwData[evIt]) != len(emData[evIt]):
nDiff+=1
continue
goodJet=0
for hwJet in hwData[evIt]:
for emJet in emData[evIt]:
if hwJet[0] == emJet[0]:
if (hwJet[1]-emJet[1])<0.01:
if (hwJet[2]-emJet[2])<0.01:
goodJet+=1
if goodJet < len(hwData[evIt]):
nDiff+=1
print("\n\nnEvent = " + str(nEv) + "\nnDiff = " + str(nDiff) + "\nGood events = " + str((1-float(nDiff)/float(nEv))*100) + "%")
print("\n\n=====================================================================================")
print("\t\tFirmware Events: " + str(nHw) + "\t\t" + "Emulator Events: " + str(nEm))
print("=====================================================================================")
print("\t\tpT\t" + "eta\t" + "phi\t\t" + "pT\t" + "eta\t" + "phi\t")
print("=====================================================================================")
for evIt in range(0,nEv):
if hwData[evIt][0][0] > 0:
hwDataNoZ.append(hwData[evIt])
if emData[evIt][0][0] > 0:
emDataNoZ.append(emData[evIt])
nEvNoZ+=1
for evIt in range(0,nEv):
if hwData[evIt][0][0] ==0 and emData[evIt][0][0] == 0:
continue
jetCount=0
jetDiff = len(hwData[evIt]) - len(emData[evIt])
print("")
if jetDiff==0:
for jetIt in range(len(hwData[evIt])):
print(str(evIt) + "\t\t" + str(hwData[evIt][jetIt][0]) + "\t" + str(hwData[evIt][jetIt][1])[:4] + "\t" + str(hwData[evIt][jetIt][2])[:4] + "\t\t" +
str(emData[evIt][jetIt][0]) + "\t" + str(emData[evIt][jetIt][1])[:4] + "\t" + str(emData[evIt][jetIt][2])[:4])
if jetDiff>0:
for jetIt in range(len(hwData[evIt])):
jetCount+=1
if jetCount > len(emData[evIt]):
emData[evIt].append([0,0,0])
print(str(evIt) + "\t\t" + str(hwData[evIt][jetIt][0]) + "\t" + str(hwData[evIt][jetIt][1])[:4] + "\t" + str(hwData[evIt][jetIt][2])[:4] + "\t\t" +
str(emData[evIt][jetIt][0]) + "\t" + str(emData[evIt][jetIt][1])[:4] + "\t" + str(emData[evIt][jetIt][2])[:4])
if jetDiff<0:
for jetIt in range(len(emData[evIt])):
jetCount+=1
if jetCount > len(hwData[evIt]):
hwData[evIt].append([0,0,0])
print(str(evIt) + "\t\t" + str(hwData[evIt][jetIt][0]) + "\t" + str(hwData[evIt][jetIt][1])[:4] + "\t" + str(hwData[evIt][jetIt][2])[:4] + "\t\t" +
str(emData[evIt][jetIt][0]) + "\t" + str(emData[evIt][jetIt][1])[:4] + "\t" + str(emData[evIt][jetIt][2])[:4])
fig, axs = plt.subplots(2,3, figsize=(20, 10), gridspec_kw={'height_ratios': [3, 1]})
fig.patch.set_facecolor( '#ffffff')
nPtHw, bPtHw = np.histogram([jet[0] for event in hwDataNoZ for jet in event], bins=50, range=(0,200))
nEtaHw, bEtaHw = np.histogram([jet[1] for event in hwDataNoZ for jet in event], bins=18, range=(0,1.5))
nPhiHw, bPhiHw = np.histogram([jet[2] for event in hwDataNoZ for jet in event], bins=8, range=(0,0.7))
meansPt = [0.5*(bPtHw[i] + bPtHw[i+1]) for i in range(len(nPtHw))]
meansEta = [0.5*(bEtaHw[i] + bEtaHw[i+1]) for i in range(len(nEtaHw))]
meansPhi = [0.5*(bPhiHw[i] + bPhiHw[i+1]) for i in range(len(nPhiHw))]
nPtEm = axs[0,0].hist([jet[0] for event in emDataNoZ for jet in event], bins=50, range=(0,200), histtype='bar', linewidth=1.5, label='Emulator', color='#929591', zorder=0)[0]
nEtaEm = axs[0,1].hist([jet[1] for event in emDataNoZ for jet in event], bins=18, range=(0,1.5), histtype='bar', linewidth=1.5, label='Emulator', color='#929591', zorder=0)[0]
nPhiEm = axs[0,2].hist([jet[2] for event in emDataNoZ for jet in event], bins=8, range=(0,0.7), histtype='bar', linewidth=1.5, label='Emulator', color='#929591', zorder=0)[0]
axs[0,0].scatter(meansPt, nPtHw, label='Firmware', c='#000000', linewidths=0.5, s=25, marker='+')
axs[0,1].scatter(meansEta, nEtaHw, label='Firmware', c='#000000', linewidths=0.5, s=25, marker='+')
axs[0,2].scatter(meansPhi, nPhiHw, label='Firmware', c='#000000', linewidths=0.5, s=25, marker='+')
axs[1,0].scatter(meansPt, [(hw/em) for hw,em in zip(nPtHw,nPtEm)] , c='#000000', linewidths=0.5, s=15, zorder=1)
axs[1,1].scatter(meansEta, [(hw/em) for hw,em in zip(nEtaHw,nEtaEm)], c='#000000', linewidths=0.5, s=15, zorder=1)
axs[1,2].scatter(meansPhi, [(hw/em) for hw,em in zip(nPhiHw,nPhiEm)], c='#000000', linewidths=0.5, s=15, zorder=1)
axs[1,0].axhline(y=1, linewidth=1, linestyle='--', c='#929591')
axs[1,1].axhline(y=1, linewidth=1, linestyle='--', c='#929591')
axs[1,2].axhline(y=1, linewidth=1, linestyle='--', c='#929591')
axs[1,0].set(ylim=(0.5,1.5))
axs[1,1].set(ylim=(0.5,1.5))
axs[1,2].set(ylim=(0.5,1.5))
axs[0,0].set(ylabel="Events")
axs[1,0].set(ylabel="FW / EMU")
axs[0,0].legend(prop={'size': 10})
axs[0,1].legend(prop={'size': 10})
axs[0,2].legend(prop={'size': 10})
ymaxPt = max(np.concatenate([nPtHw,nPtEm]))
ymaxEta = max(np.concatenate([nEtaHw,nEtaEm]))
ymaxPhi = max(np.concatenate([nPhiHw,nPhiEm]))
axs[0,0].set(xlim=(0,200))
axs[0,1].set(xlim=(0,1.5))
axs[0,2].set(xlim=(0,0.7))
axs[0,0].set(ylim=(0,ymaxPt +(0.05*ymaxPt)))
axs[0,1].set(ylim=(0,ymaxEta+(0.05*ymaxEta)))
axs[0,2].set(ylim=(0,ymaxPhi+(0.05*ymaxPhi)))
axs[0,0].set(xlabel="Jet $p_T$ (GeV)")
axs[0,1].set(xlabel="Jet $\eta$")
axs[0,2].set(xlabel="Jet $\phi$")
plt.savefig('ttbarPU200_3900.pdf', bbox_inches='tight')
|
dash-display-error-messages.py | oriolmirosa/dash-recipes | 932 | 11160718 | import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import flask
import os
app = dash.Dash()
app.layout = html.Div([
dcc.Input(id='input', value=''),
html.Div(id='output')
])
@app.callback(
Output('output', 'children'),
[Input('input', 'value')])
def output(value):
flask.session['error-message'] = 'test'
if value == 'error':
1/0
else:
return value
app.scripts.append_script({
'external_url': '/static/dash-error-message-display.js'
})
@app.server.route('/static/<filename>.js')
def serve_script(filename):
print(('serving {}'.format(filename)))
if filename not in ['dash-error-message-display']:
raise Exception('"{}" is excluded from the allowed static files'.format(filename))
return flask.send_from_directory(os.getcwd(), '{}.js'.format(filename))
if __name__ == '__main__':
app.run_server(debug=True)
|
usaspending_api/agency/tests/integration/test_agency_budget_function.py | ststuck/usaspending-api | 217 | 11160737 | <gh_stars>100-1000
import pytest
from rest_framework import status
from usaspending_api.common.helpers.fiscal_year_helpers import current_fiscal_year
url = "/api/v2/agency/{code}/budget_function/{query_params}"
@pytest.mark.django_db
def test_budget_function_list_success(client, monkeypatch, agency_account_data, helpers):
helpers.mock_current_fiscal_year(monkeypatch)
resp = client.get(url.format(code="007", query_params=""))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 3,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 11100000.0,
"name": "NAME 1",
"obligated_amount": 111.0,
"children": [{"gross_outlay_amount": 11100000.0, "name": "NAME 1A", "obligated_amount": 111.0}],
},
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
},
{
"gross_outlay_amount": 1000000.0,
"name": "NAME 5",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "NAME 5A", "obligated_amount": 10.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
query_params = "?fiscal_year=2017"
resp = client.get(url.format(code="008", query_params=query_params))
expected_result = {
"fiscal_year": 2017,
"toptier_code": "008",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 1,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 10000.0,
"name": "<NAME>",
"obligated_amount": 1000.0,
"children": [{"gross_outlay_amount": 10000.0, "name": "<NAME>", "obligated_amount": 1000.0}],
}
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
# this agency has a record but the amounts are both 0, so we expect this return no results
query_params = "?fiscal_year=2016"
resp = client.get(url.format(code="010", query_params=query_params))
expected_result = {
"fiscal_year": 2016,
"toptier_code": "010",
"messages": [
"Account data powering this endpoint were first collected in "
"FY2017 Q2 under the DATA Act; as such, there are no data "
"available for prior fiscal years."
],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 0,
"limit": 10,
},
"results": [],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
@pytest.mark.django_db
def test_budget_function_list_too_early(client, agency_account_data):
query_params = "?fiscal_year=2007"
resp = client.get(url.format(code="007", query_params=query_params))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_budget_function_list_future(client, agency_account_data):
query_params = "?fiscal_year=" + str(current_fiscal_year() + 1)
resp = client.get(url.format(code="007", query_params=query_params))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_budget_function_list_bad_sort(client, agency_account_data):
query_params = "?sort=not valid"
resp = client.get(url.format(code="007", query_params=query_params))
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_budget_function_list_bad_order(client, agency_account_data):
query_params = "?order=not valid"
resp = client.get(url.format(code="007", query_params=query_params))
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_budget_function_list_sort_by_name(client, monkeypatch, agency_account_data, helpers):
helpers.mock_current_fiscal_year(monkeypatch)
query_params = f"?fiscal_year={helpers.get_mocked_current_fiscal_year()}&order=asc&sort=name"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 3,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 11100000.0,
"name": "NAME 1",
"obligated_amount": 111.0,
"children": [{"gross_outlay_amount": 11100000.0, "name": "NAME 1A", "obligated_amount": 111.0}],
},
{
"gross_outlay_amount": 1000000.0,
"name": "NAME 5",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "NAME 5A", "obligated_amount": 10.0}],
},
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
query_params = f"?fiscal_year={helpers.get_mocked_current_fiscal_year()}&order=desc&sort=name"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 3,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
},
{
"gross_outlay_amount": 1000000.0,
"name": "NAME 5",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "NAME 5A", "obligated_amount": 10.0}],
},
{
"gross_outlay_amount": 11100000.0,
"name": "NAME 1",
"obligated_amount": 111.0,
"children": [{"gross_outlay_amount": 11100000.0, "name": "NAME 1A", "obligated_amount": 111.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
@pytest.mark.django_db
def test_budget_function_list_sort_by_obligated_amount(client, monkeypatch, agency_account_data, helpers):
helpers.mock_current_fiscal_year(monkeypatch)
query_params = f"?fiscal_year={helpers.get_mocked_current_fiscal_year()}&order=asc&sort=obligated_amount"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 3,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 1000000.0,
"name": "NAME 5",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "NAME 5A", "obligated_amount": 10.0}],
},
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
},
{
"gross_outlay_amount": 11100000.0,
"name": "NAME 1",
"obligated_amount": 111.0,
"children": [{"gross_outlay_amount": 11100000.0, "name": "NAME 1A", "obligated_amount": 111.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
query_params = f"?fiscal_year={helpers.get_mocked_current_fiscal_year()}&order=desc&sort=obligated_amount"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 3,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 11100000.0,
"name": "NAME 1",
"obligated_amount": 111.0,
"children": [{"gross_outlay_amount": 11100000.0, "name": "NAME 1A", "obligated_amount": 111.0}],
},
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
},
{
"gross_outlay_amount": 1000000.0,
"name": "NAME 5",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "NAME 5A", "obligated_amount": 10.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
@pytest.mark.django_db
def test_budget_function_list_sort_by_gross_outlay_amount(client, monkeypatch, agency_account_data, helpers):
helpers.mock_current_fiscal_year(monkeypatch)
query_params = f"?fiscal_year={helpers.get_mocked_current_fiscal_year()}&order=asc&sort=gross_outlay_amount"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 3,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
},
{
"gross_outlay_amount": 1000000.0,
"name": "NAME 5",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "NAME 5A", "obligated_amount": 10.0}],
},
{
"gross_outlay_amount": 11100000.0,
"name": "NAME 1",
"obligated_amount": 111.0,
"children": [{"gross_outlay_amount": 11100000.0, "name": "NAME 1A", "obligated_amount": 111.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
query_params = f"?fiscal_year={helpers.get_mocked_current_fiscal_year()}&order=desc&sort=gross_outlay_amount"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 3,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 11100000.0,
"name": "NAME 1",
"obligated_amount": 111.0,
"children": [{"gross_outlay_amount": 11100000.0, "name": "NAME 1A", "obligated_amount": 111.0}],
},
{
"gross_outlay_amount": 1000000.0,
"name": "NAME 5",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "NAME 5A", "obligated_amount": 10.0}],
},
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
@pytest.mark.django_db
def test_budget_function_list_search(client, monkeypatch, agency_account_data, helpers):
helpers.mock_current_fiscal_year(monkeypatch)
query_params = f"?fiscal_year={helpers.get_mocked_current_fiscal_year()}&filter=NAME 6"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 1,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
}
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
query_params = f"?fiscal_year={helpers.get_mocked_current_fiscal_year()}&filter=AME 5"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": helpers.get_mocked_current_fiscal_year(),
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": False,
"next": None,
"page": 1,
"previous": None,
"total": 1,
"limit": 10,
},
"results": [
{
"gross_outlay_amount": 1000000.0,
"name": "NAME 5",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "NAME 5A", "obligated_amount": 10.0}],
}
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
@pytest.mark.django_db
def test_budget_function_list_pagination(client, agency_account_data):
query_params = f"?fiscal_year=2020&limit=2&page=1"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": 2020,
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": True,
"hasPrevious": False,
"next": 2,
"page": 1,
"previous": None,
"total": 3,
"limit": 2,
},
"results": [
{
"gross_outlay_amount": 11100000.0,
"name": "NAME 1",
"obligated_amount": 111.0,
"children": [{"gross_outlay_amount": 11100000.0, "name": "NAME 1A", "obligated_amount": 111.0}],
},
{
"gross_outlay_amount": 100000.0,
"name": "NAME 6",
"obligated_amount": 100.0,
"children": [{"gross_outlay_amount": 100000.0, "name": "NAME 6A", "obligated_amount": 100.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
query_params = f"?fiscal_year=2020&limit=2&page=2"
resp = client.get(url.format(code="007", query_params=query_params))
expected_result = {
"fiscal_year": 2020,
"toptier_code": "007",
"messages": [],
"page_metadata": {
"hasNext": False,
"hasPrevious": True,
"next": None,
"page": 2,
"previous": 1,
"total": 3,
"limit": 2,
},
"results": [
{
"gross_outlay_amount": 1000000.0,
"name": "<NAME>",
"obligated_amount": 10.0,
"children": [{"gross_outlay_amount": 1000000.0, "name": "<NAME>", "obligated_amount": 10.0}],
},
],
}
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == expected_result
|
educative/linkedlists/reverseSubList.py | monishshah18/python-cp-cheatsheet | 140 | 11160746 | <gh_stars>100-1000
from __future__ import print_function
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def print_list(self):
temp = self
while temp is not None:
print(temp.value, end=" ")
temp = temp.next
print()
"""
# time: 14 M
Error: ommitted subNode = node
"""
def reverse_sub_list(head, p, q):
node = head
OrigPrev = None
while node and node.value != p:
OrigPrev = node
node = node.next
subListHead = node
subNode = node
prev = None
while subNode and subNode.value != q:
subNode.next, prev, subNode = prev, subNode, subNode.next
OrigPrev.next = subNode # 1->4
subListHead.next = subNode.next # 2->5
subNode.next = prev # 3->4
return head
def main():
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
print("Nodes of original LinkedList are: ", end='')
head.print_list()
result = reverse_sub_list(head, 2, 4)
print("Nodes of reversed LinkedList are: ", end='')
result.print_list()
main()
|
vumi/tests/test_testutils.py | seidu626/vumi | 199 | 11160750 | <reponame>seidu626/vumi<filename>vumi/tests/test_testutils.py<gh_stars>100-1000
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.web.client import Agent, readBody
from vumi.tests.utils import LogCatcher, MockHttpServer
from vumi import log
from vumi.tests.helpers import VumiTestCase
class TestLogCatcher(VumiTestCase):
def test_simple_catching(self):
lc = LogCatcher()
with lc:
log.info("Test")
self.assertEqual(lc.messages(), ["Test"])
def test_system_filtering(self):
lc = LogCatcher(system="^ab")
with lc:
log.info("Test 1", system="abc")
log.info("Test 2", system="def")
self.assertEqual(lc.messages(), ["Test 1"])
def test_message_filtering(self):
lc = LogCatcher(message="^Keep")
with lc:
log.info("Keep this")
log.info("Discard this")
self.assertEqual(lc.messages(), ["Keep this"])
def test_message_concatenation(self):
lc = LogCatcher()
with lc:
log.info("Part 1", "Part 2")
self.assertEqual(lc.messages(), ["Part 1 Part 2"])
class TestMockHttpServer(VumiTestCase):
def start_mock_server(self, mock_server):
"""
Start and return the given MockHttpServer with suitable cleanup.
"""
self.add_cleanup(mock_server.stop)
d = mock_server.start()
return d.addCallback(lambda _: mock_server)
@inlineCallbacks
def assert_response(self, response, code, body=None):
self.assertEqual(response.code, code)
response_body = yield readBody(response)
if body is not None:
self.assertEqual(response_body, body)
@inlineCallbacks
def test_simple_GET(self):
"""
MockHttpHelper can handle a simple HTTP GET request.
"""
requests = []
mock_server = yield self.start_mock_server(
MockHttpServer(lambda req: requests.append(req) or "hi"))
agent = Agent(reactor)
response = yield agent.request("GET", mock_server.url + "path")
# We got a valid request and returned a valid response.
[request] = requests
self.assertEqual(request.method, "GET")
self.assertEqual(request.path, "/path")
yield self.assert_response(response, 200, "hi")
@inlineCallbacks
def test_simple_HEAD(self):
"""
MockHttpHelper can handle a simple HTTP HEAD request.
"""
requests = []
mock_server = yield self.start_mock_server(
MockHttpServer(lambda req: requests.append(req) or "hi"))
agent = Agent(reactor)
response = yield agent.request("HEAD", mock_server.url + "path")
# We got a valid request and returned a valid response.
[request] = requests
self.assertEqual(request.method, "HEAD")
self.assertEqual(request.path, "/path")
yield self.assert_response(response, 200, "")
@inlineCallbacks
def test_simple_PUT(self):
"""
MockHttpHelper can handle a simple HTTP PUT request.
"""
requests = []
mock_server = yield self.start_mock_server(
MockHttpServer(lambda req: requests.append(req) or "hi"))
agent = Agent(reactor)
response = yield agent.request("PUT", mock_server.url + "path")
# We got a valid request and returned a valid response.
[request] = requests
self.assertEqual(request.method, "PUT")
self.assertEqual(request.path, "/path")
yield self.assert_response(response, 200, "hi")
@inlineCallbacks
def test_simple_POST(self):
"""
MockHttpHelper can handle a simple HTTP POST request.
"""
requests = []
mock_server = yield self.start_mock_server(
MockHttpServer(lambda req: requests.append(req) or "hi"))
agent = Agent(reactor)
response = yield agent.request("POST", mock_server.url + "path")
# We got a valid request and returned a valid response.
[request] = requests
self.assertEqual(request.method, "POST")
self.assertEqual(request.path, "/path")
yield self.assert_response(response, 200, "hi")
@inlineCallbacks
def test_default_handler(self):
"""
The default request handler puts the request in a queue.
"""
mock_server = yield self.start_mock_server(MockHttpServer())
agent = Agent(reactor)
request_d = mock_server.queue.get()
self.assertNoResult(request_d)
response = yield agent.request("GET", mock_server.url + "path")
# We got a valid request and returned a valid (error) response.
request = self.successResultOf(request_d)
self.assertEqual(request.method, "GET")
self.assertEqual(request.path, "/path")
yield self.assert_response(response, 500)
|
bindings/python/examples/composite_example.py | lucas-tortora/iota.rs | 256 | 11160754 | import os
from urllib.request import urlopen as uReq
from urllib.error import HTTPError
import iota_client
import sqlite3
def consolidation():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
address = client.consolidate_funds(seed, 0, 0, 150)
print(f'Funds consolidated to {address}')
def create_max_dust():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
seed_2 = os.getenv('MY_IOTA_SEED_2')
new_addresses = client.get_addresses(
seed_2, input_range_begin=0, input_range_end=1)
print(f'New addresses: {new_addresses}')
try:
dust_allowance_message = client.message(seed=seed, dust_allowance_outputs=[
{'address': new_addresses[0][0],
'amount': 10_000_000}])
message_id = dust_allowance_message['message_id']
print(f'Message id: {message_id}')
client.retry_until_included(message_id)
# Split funds to own addresses
addresses = client.get_addresses(
seed, input_range_begin=1, input_range_end=101)
outputs = []
for address in addresses:
outputs.append(
{'address': address[0],
'amount': 1_000_001})
message = client.message(seed=seed, outputs=outputs)
message_id = message['message_id']
print(
f'First transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
# At this point we have 100 Mi on 100 addresses and we will just send it to the final address
# We use the outputs directly so we don't double spend them
initial_outputs = []
for index, output in enumerate(message['payload']['transaction']['essence']['outputs']):
# Only include 1 Mi outputs, otherwise it fails for the remainder address
if output['signature_locked_single']['amount'] == 1_000_001:
transaction_id = message['payload']['transaction']['essence']['inputs'][0]['transaction_id']
initial_outputs.append(
{'transaction_id': transaction_id.encode('ascii'), 'index': index})
first_address_old_seed = client.get_addresses(
seed, input_range_begin=0, input_range_end=1)
for i, output in enumerate(initial_outputs):
message = client.message(seed, inputs=[output[i]], input_range_begin=1, input_range_end=101, outputs=[
{'address': new_addresses[0][0], 'amount': 1},
{'address': first_address_old_seed[0][0], 'amount': 1_000_000}])
message_id = message['message_id']
print(
f'Transaction {i} sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
# Send all funds back to first address
total_balance = client.get_balance(seed)
print(f'Total balance: {total_balance}')
message = client.message(seed=seed, outputs=[
{'address': first_address_old_seed[0][0],
'amount': total_balance}])
message_id = message['message_id']
print(
f'Final tx sent: https://explorer.iota.org/devnet/message/{message_id}')
iota.retry_until_included(message_id)
except ValueError as e:
print(e)
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
def custom_inputs():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
addresses = client.get_addresses(
seed, input_range_begin=0, input_range_end=1)
print(f'addresses: {addresses}')
outputs = client.get_address_outputs(addresses[0][0])
print(f'outputs: {outputs}')
try:
message = client.message(seed=seed, inputs=[outputs], outputs=[
{'address': 'atoi1qzt0nhsf38nh6rs4p6zs5knqp6psgha9wsv74uajqgjmwc75ugupx3y7x0r',
'amount': 1000}])
message_id = message['message_id']
print(
f'Transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
except:
print(f'Please send tokens to {addresses[0][0]}')
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
def custom_parent():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
parent = 'b5634e05a7c665d7f87330a53633f001a5d1d96b346dc98dc225c4d6c204f23b'
try:
message = client.message(parents=[parent])
message_id = message['message_id']
print(
f'Empty message sent: https://explorer.iota.org/devnet/message/{message_id}')
except:
print('Please select a valid parent message id')
def custom_payload():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
indexation_key = 'My indexation payload key'
indexation_data = 'My indexation payload Data'
message = client.message(index=indexation_key, data_str=indexation_data)
print(f'The sent message: {message}')
def dust():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
try:
message = client.message(seed=seed, dust_allowance_outputs=[
{'address': 'atoi1qpnrumvaex24dy0duulp4q07lpa00w20ze6jfd0xly422kdcjxzakzsz5kf',
'amount': 1_000_000}],
outputs=[{'address': 'atoi1qpnrumvaex24dy0duulp4q07lpa00w20ze6jfd0xly422kdcjxzakzsz5kf',
'amount': 1}])
message_id = message['message_id']
print(
f'First transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
except ValueError as e:
print(e)
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
def get_fund():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
addresses = client.get_addresses(
seed, account_index=0, input_range_begin=0, input_range_end=1)
print(f'Addresses: {addresses}')
address = addresses[0][0]
try:
my_url = f'https://faucet.chrysalis-devnet.iota.cafe/api?address={address}'
print(f'my_url: {my_url}')
uClient = uReq(my_url)
response = uClient.read()
print(response)
uClient.close()
except HTTPError as e:
print(e)
print('Please try it after 60 secs')
def indexation():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
indexation_key = 'Hello'
indexation_data = 'Tangle'
message = client.message(index=indexation_key, data_str=indexation_data)
fetched_message_ids = client.get_message_index('Hello')
print(f'Fetched message ids: {fetched_message_ids}')
fetched_message_data = client.get_message_data(fetched_message_ids[0])
print(f'Fetched message data: {fetched_message_data}')
def message_time():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
indexation_key = 'Hello'
indexation_data = 'Tangle'
message = client.message(index=indexation_key, data_str=indexation_data)
print(message)
message_id = message['message_id']
print(f'Message id: {message_id}')
client.retry_until_included(message_id)
metadata = client.get_message_metadata(message_id)
print(f'Metadata: {metadata}')
milestone_index = metadata['milestone_index']
print(f'Milestone index: {milestone_index}')
if not (milestone_index is None):
milestone = client.get_milestone(milestone_index)
print(f'Message got referenced by milestone {milestone}')
else:
print('Message is not referenced by a milestone')
def mnemonic():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
mnemonic = client.generate_mnemonic()
print(f'Generated mnemonic: {mnemonic}')
seed = client.mnemonic_to_hex_seed(mnemonic)
addresses = client.get_addresses(
seed, input_range_begin=0, input_range_end=2)
print(f'List of generated public addresses: {addresses}')
def multiple_outputs():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
# Seed must contain non-zero balance
seed = os.getenv('MY_IOTA_SEED')
try:
message = client.message(seed,
outputs=[{'address': 'atoi1qpnrumvaex24dy0duulp4q07lpa00w20ze6jfd0xly422kdcjxzakzsz5kf',
'amount': 3_000_000},
{'address': 'atoi1qz4sfmp605vnj6fxt0sf0cwclffw5hpxjqkf6fthyd74r9nmmu337m3lwl2',
'amount': 2_800_000},
{'address': 'atoi1qzumqjtucwglfja746vvmr7n54ep88kcu2qvaquqrnx9qs2z8f4t6d7muyq',
'amount': 3_000_000}])
message_id = message['message_id']
print(
f'Transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
except ValueError as e:
print(e)
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
def peer():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
try:
peers = client.get_peers()
print(f'Peers: {peers}')
except ValueError as e:
print(e)
print("You don't have the permission to get the peers.")
def quorum():
node_1 = 'https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe'
node_2 = 'https://api.thin-hornet-1.h.chrysalis-devnet.iota.cafe'
node_3 = 'https://api.lb-0.h.chrysalis-devnet.iota.cafe'
try:
client = iota_client.Client(nodes_name_password=[
[node_1], [node_2], [node_3]], quorum=True, quorum_size=3, quorum_threshold=66)
seed = os.getenv('MY_IOTA_SEED')
seed_balance = client.get_balance(seed)
print(f'Account balance: {seed_balance}')
except ValueError as e:
print(e)
print('Please provide enough healthy nodes.')
def search_address():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
address = client.get_addresses(
seed, input_range_begin=9, input_range_end=10)[0][0]
print(f'Address: {address}')
info = client.get_info()
bech32_hrp = info['nodeinfo']['bech32_hrp']
searched_address = client.search_address(
seed, bech32_hrp, 0, 0, 10, address)
print(
f'Address index: {searched_address[0]}\nIs internal address: {searched_address[1]}')
def send_all():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
seed_2 = os.getenv('MY_IOTA_SEED_2')
total_balance = client.get_balance(seed, account_index=0)
print(f'Total balance: {total_balance}')
try:
address = client.get_addresses(
seed_2, input_range_begin=0, input_range_end=1)[0][0]
print(f'Address: {address}')
message = client.message(seed_2, outputs=[
{'address': address,
'amount': total_balance}])
message_id = message['message_id']
print(
f'Transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
except ValueError as e:
print(e)
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
def split_all():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
seed_2 = os.getenv('MY_IOTA_SEED_2')
total_balance = client.get_balance(seed)
print(f'Total balance: {total_balance}')
if total_balance == 0:
print('Addresses belonging to the seed should contain tokens!')
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
return
available = total_balance
# Get the ceiling of the input range end
input_range_end = int((total_balance+999_999)/1_000_000)
addresses_from_seed_2 = client.get_addresses(
seed_2, input_range_begin=0, input_range_end=input_range_end)
outputs = []
for i in range(input_range_end):
amount = 1_000_000
# Don't add more than we have or is allowed; One less here for remaining iotas
if available == 0 or i > 125:
break
available -= amount
# Add last amount so we don't create dust
if available < amount:
amount += available
available = 0
outputs.append({'address': addresses_from_seed_2[i], 'amount': amount})
message = client.message(seed, outputs=outputs)
message_id = message['message_id']
print(
f'Transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
def split_outputs_single_address():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
# Split funds to own addresses
addresses = client.get_addresses(
seed, input_range_begin=1, input_range_end=101)
outputs = []
for output in addresses:
address = output[0]
outputs.append({'address': address, 'amount': 1_000_000})
print(f'Outputs: {outputs}')
try:
message = client.message(seed, outputs=outputs)
message_id = message['message_id']
print(
f'First transaction sent: https://explorer.iota.org/devnet/message/{messag_id}')
client.retry_until_included(message_id)
# At this point we have 100 Mi on 100 addresses and we will just send it to the final address
# We use the outputs directly so we don't double spend them
initial_outputs = []
for index, output in enumerate(message['payload']['transaction']['essence']['outputs']):
if output['signature_locked_single']['amount'] == 1_000_000:
transaction_id = message['payload']['transaction']['essence']['inputs'][0]['transaction_id']
initial_outputs.append(
{'transaction_id': transaction_id.encode('ascii'), 'index': index})
for i, output in enumerate(initial_outputs):
message = client.message(seed, inputs=[output[i]], input_range_begin=1, input_range_end=101, outputs=[
{'address': 'atoi1qzt0nhsf38nh6rs4p6zs5knqp6psgha9wsv74uajqgjmwc75ugupx3y7x0r', 'amount': 1_000_000}])
message_id = message['message_id']
print(
f'Transaction {i} sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
except ValueError as e:
print(e)
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
def storage():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
indexation_key = 'Hello'
indexation_data = 'Tangle'
message = client.message(index=indexation_key, data_str=indexation_data)
message_id = message['message_id']
db = 'my-storage.db'
con = sqlite3.connect(db)
cur = con.cursor()
# Create table
cur.execute('''CREATE TABLE message_ids (message_id text)''')
# Insert a row of data
cur.execute(f"INSERT INTO message_ids VALUES ('{message_id}')")
# Save (commit) the changes
con.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
con.close()
# Connect to the database and get the message ids
con = sqlite3.connect(db)
cur = con.cursor()
for row in cur.execute('SELECT * FROM message_ids'):
print(f'Message ID from storage: {row[0]}')
def transaction():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
seed_2 = os.getenv('MY_IOTA_SEED_2')
addresses_seed_2 = client.get_addresses(
seed_2, input_range_begin=0, input_range_end=1)
address = addresses_seed_2[0][0]
try:
# Send the first transaction
message = client.message(
seed, outputs=[{'address': address, 'amount': 3_000_000}])
message_id = message['message_id']
print(
f'First transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
# Send the second transaction
addresses_seed_2 = client.get_addresses(
seed_2, input_range_begin=1, input_range_end=2)
address = addresses_seed_2[0][0]
message = client.message(
seed, outputs=[{'address': address, 'amount': 3_000_000}])
message_id = message['message_id']
print(
f'Second transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
# Send the third transaction
addresses_seed_2 = client.get_addresses(
seed_2, input_range_begin=2, input_range_end=3)
address = addresses_seed_2[0][0]
message = client.message(
seed, outputs=[{'address': address, 'amount': 3_000_000}])
message_id = message['message_id']
print(
f'Third transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
# Send the last transaction
outputs = []
outputs.append({'address': client.get_addresses(
seed, input_range_begin=1, input_range_end=2)[0][0], 'amount': 3_000_000})
outputs.append({'address': client.get_addresses(
seed, input_range_begin=2, input_range_end=3)[0][0], 'amount': 3_000_000})
client.message(seed_2, outputs=outputs)
message_id = message['message_id']
print(
f'Last transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
message_metadata = client.get_message_metadata(message_id)
ledger_inclusion_state = message_metadata['ledger_inclusion_state']
print(f'Ledger Inclusion State: {ledger_inclusion_state}')
except ValueError as e:
print(e)
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
def txspam():
node_url = "https://api.thin-hornet-0.h.chrysalis-devnet.iota.cafe"
client = iota_client.Client(nodes_name_password=[[node_url]])
seed = os.getenv('MY_IOTA_SEED')
# Split funds to own addresses
addresses = client.get_addresses(
seed, account_index=0, input_range_begin=0, input_range_end=10)
outputs = []
for output in addresses:
outputs.append({'address': output[0], 'amount': 1_000_000})
print(f'Outputs: {outputs}')
try:
message = client.message(seed, outputs=outputs)
print(message)
message_id = message['message_id']
print(
f'First transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
client.retry_until_included(message_id)
# At this point we have 10 Mi on 10 addresses and we will just send it to their addresses again
# Use own outputs directly so we don't double spend them
initial_outputs = []
for index, initial_output in enumerate(message['payload']['transaction']['essence']['outputs']):
transaction_id = message['payload']['transaction']['essence']['inputs'][0]['transaction_id']
initial_outputs.append(
{'transaction_id': transaction_id.encode('ascii'), 'index': index})
for i, output in enumerate(addresses):
message = client.message(seed, inputs=initial_outputs[i], outpus=[
{'address': output[0], 'amount': 1_000_000}])
message_id = message['message_id']
print(
f'Transaction sent: https://explorer.iota.org/devnet/message/{message_id}')
except ValueError as e:
print(e)
print('Website to get test tokens: https://faucet.chrysalis-devnet.iota.cafe/')
if __name__ == '__main__':
"""Please uncomment the example function to use it.
"""
consolidation()
# create_max_dust()
# custom_inputs()
# custom_parent()
# custom_payload()
# dust()
# get_fund()
# indexation()
# message_time()
# mnemonic()
# multiple_outputs()
# peer()
# quorum()
# search_address()
# send_all()
# split_all()
# split_outputs_single_address()
# storage()
# transaction()
# txspam()
|
tests/basics/set_pop.py | learnforpractice/micropython-cpp | 692 | 11160797 | <gh_stars>100-1000
s = {1}
print(s.pop())
try:
print(s.pop(), "!!!")
except KeyError:
pass
else:
print("Failed to raise KeyError")
# this tests an optimisation in mp_set_remove_first
# N must not be equal to one of the values in hash_allocation_sizes
N = 11
s = set(range(N))
while s:
print(s.pop()) # last pop() should trigger the optimisation
for i in range(N):
s.add(i) # check that we can add the numbers back to the set
print(list(s))
|
tensorflow_model_analysis/metrics/tf_metric_accumulators.py | jaymessina3/model-analysis | 1,118 | 11160810 | # Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF metric accumulators."""
from __future__ import absolute_import
from __future__ import division
# Standard __future__ imports
from __future__ import print_function
from typing import Any, Callable, List, Optional, Text, Tuple, Union
import numpy as np
from tensorflow_model_analysis.metrics import metric_util
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.utils import size_estimator
class TFMetricsAccumulator(object):
"""Accumulator for TF metrics.
Attributes:
inputs: Accumulated batch of inputs. The inputs are stored in a
multi-dimensional list. The first dimension is used to index the
associated output (for single-output models this will only have one item).
The second dimension is used to store the args used by the combiner. For
example the args might be a tf.Example if feeding a model or they might be
(y_true, y_pred, example_weight) for calling update_state directly.
Batching is done on the last dimension.
weights: Accumulated weights. The weights are stored in a multi-dimensional
list where the first dimension is used to index the associated output (for
single-output models this will only have one item). The second dimension
is used to store the accumulated weights for each metric associated with
the output dimension.
size_estimator: Batch size estimator.
desired_batch_size: Desired batch size.
"""
# We really want the batch size to be adaptive like it is in
# beam.BatchElements(), but there isn't an easy way to make it so. For now
# we will limit stored inputs to a max overall byte size.
# TODO(b/73789023): Figure out how to make this batch size dynamic.
_TOTAL_INPUT_BYTE_SIZE_THRESHOLD = 16 << 20 # 16MiB
_DEFAULT_DESIRED_BATCH_SIZE = 1000
__slots__ = ['_inputs', '_weights', '_size_estimator', '_desired_batch_size']
def __init__(self,
input_counts: List[int],
metric_counts: List[int],
size_estimator_fn: Callable[[Any], int],
desired_batch_size: Optional[int] = None):
"""Initializes accumulator using a list of metric counts per output.
Args:
input_counts: Number of inputs associated with each output index.
metric_counts: Number of metrics associated with each output index.
size_estimator_fn: Function to use for estimating the size of the inputs.
desired_batch_size: FOR TESTING ONLY.
"""
# Inputs have shape (num_outputs, num_metrics, num_accumulated_inputs)
self._inputs = []
# Weights have shape (num_outputs, num_metrics)
self._weights = [] # type: List[List[Optional[np.ndarray]]]
for input_count in input_counts:
self._inputs.append(tuple([] for _ in range(input_count)))
for output_metric_count in metric_counts:
self._weights.append([None] * output_metric_count)
self._size_estimator = size_estimator.SizeEstimator(
size_threshold=self._TOTAL_INPUT_BYTE_SIZE_THRESHOLD,
size_fn=size_estimator_fn)
if desired_batch_size and desired_batch_size > 0:
self._desired_batch_size = desired_batch_size
else:
self._desired_batch_size = self._DEFAULT_DESIRED_BATCH_SIZE
def len_inputs(self) -> int:
"""Returns length of inputs."""
return len(self._inputs[0][0])
def add_input(self, output_index: int, *args):
"""Adds new inputs to the lists of input args stored at output_index."""
for i, v in enumerate(args):
self._inputs[output_index][i].append(v)
if v is not None:
self._size_estimator.update(v)
def get_inputs(self, output_index: int) -> Any:
"""Returns input args for output at given offset."""
return self._inputs[output_index]
def clear_inputs(self):
"""Clears currently stored inputs."""
for output_index in range(len(self._inputs)):
for i in range(len(self._inputs[output_index])):
del self._inputs[output_index][i][:]
self._size_estimator.clear()
def add_weights(self, output_index: int, metric_index: int,
weights: np.ndarray):
"""Adds weights for metric at given metric_index and output_index."""
cur_weights = self._weights[output_index][metric_index]
if cur_weights is None:
self._weights[output_index][metric_index] = weights
else:
self._weights[output_index][metric_index] = np.add(cur_weights, weights)
def get_weights(self, output_index: int,
metric_index: int) -> Optional[np.ndarray]:
"""Gets currently stored weights for given metric_index and output_index."""
return self._weights[output_index][metric_index]
def should_flush(self) -> bool:
"""Returns true if size estimator indicates flush is needed."""
return (self.len_inputs() >= self._desired_batch_size or
self._size_estimator.should_flush())
def get_size_estimate(self) -> int:
"""Returns size estimator associated with accumulator."""
return self._size_estimator.get_estimate()
def _numpy_array_size_fn(array: np.ndarray) -> int:
"""Size estimator for numpy arrays."""
return array.nbytes
class TFCompilableMetricsAccumulator(TFMetricsAccumulator):
"""Accumulator for compilable TF metrics.
Attributes:
inputs: Accumulated batch of inputs. The inputs are stored in a
multi-dimensional list. The first dimension is used to index the
associated output (for single-output models this will only have one item).
The second dimension is used to store the args passed to update_state
(i.e. (y_true, y_pred, example_weight)). Batching is done on the last
dimension.calling update_state directly. Batching is done on the last
dimension.
weights: Accumulated weights. The weights are stored in a multi-dimensional
list where the first dimension is used to index the associated output (for
single-output models this will only have one item). The second dimension
is used to store the accumulated weights for each metric associated with
the output dimension.
pad: True if padding needed.
last_dim: Max size of the last dimension of labels or predictions (used with
padding).
size_estimator: Batch size estimator.
desired_batch_size: Desired batch size.
"""
__slots__ = [
'_inputs', '_weights', '_pad', '_pad_to_dim', '_label_padding',
'_prediction_padding', '_size_estimator', '_desired_batch_size'
]
def __init__(self,
padding_options: Optional[config_pb2.PaddingOptions],
metric_counts: List[int],
desired_batch_size: Optional[int] = None):
"""Initializes accumulator using a list of metric counts per output."""
super(TFCompilableMetricsAccumulator, self).__init__(
# Input args of labels, predictions, example_weights for each output.
input_counts=[3] * len(metric_counts),
metric_counts=metric_counts,
size_estimator_fn=_numpy_array_size_fn,
desired_batch_size=desired_batch_size)
self._pad = False
if padding_options is not None:
def get_padding_value(oneof_name):
oneof = padding_options.WhichOneof(oneof_name)
return None if oneof is None else getattr(padding_options, oneof)
self._pad = True
self._label_padding = get_padding_value('label_padding')
self._prediction_padding = get_padding_value('prediction_padding')
self._pad_to_dim = 0
def add_input(self, output_index: int, label: np.ndarray,
prediction: np.ndarray, example_weight: np.ndarray):
"""Adds label, prediction, and example weight to output_index."""
super(TFCompilableMetricsAccumulator,
self).add_input(output_index, label, prediction, example_weight)
if self._pad:
self._pad_to_dim = max(self._pad_to_dim, label.shape[-1],
prediction.shape[-1])
def get_inputs(
self, output_index: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Returns labels, predictions, and weights for output at given offset."""
labels, preds, example_weights = super(TFCompilableMetricsAccumulator,
self).get_inputs(output_index)
if self._pad:
def pad_value(
name: Text, a: np.ndarray,
configured_value: Optional[Union[float, int]]) -> Union[int, float]:
if configured_value is None:
return 0 if a.dtype.kind == 'i' else .0
if isinstance(configured_value, int) and a.dtype.kind == 'i':
return configured_value
if isinstance(configured_value, float) and a.dtype.kind == 'f':
return configured_value
raise ValueError('%s padding is configured to be %s but data is %s' %
(name, type(configured_value), a.dtype))
labels = [
metric_util.pad(l, self._pad_to_dim,
pad_value('label', l, self._label_padding))
for l in labels
]
preds = [
metric_util.pad(p, self._pad_to_dim,
pad_value('prediction', p, self._prediction_padding))
for p in preds
]
return (np.array(labels), np.array(preds), np.array(example_weights))
def clear_inputs(self):
"""Clears currently stored inputs."""
super(TFCompilableMetricsAccumulator, self).clear_inputs()
self._pad_to_dim = 0
|
ais/compatibility/__init__.py | andyvan-trabus/libais | 161 | 11160815 | <reponame>andyvan-trabus/libais
import warnings
warnings.warn(
"The compatibility module is deprecated and will be removed in 1.0",
FutureWarning,
stacklevel=2
)
|
bmtk/analyzer/io_tools.py | aaberbach/bmtk | 216 | 11160818 | from six import string_types
from bmtk.utils.sonata.config import SonataConfig as ConfigDict
def load_config(config):
if isinstance(config, string_types):
return ConfigDict.from_json(config)
elif isinstance(config, dict):
return ConfigDict.from_dict(config)
else:
raise Exception('Could not convert {} (type "{}") to json.'.format(config, type(config))) |
perma_web/perma/tasks.py | fakegit/perma | 317 | 11160823 | import tempfile
import traceback
from collections import OrderedDict
from contextlib import contextmanager
from pyquery import PyQuery
from http.client import CannotSendRequest
from urllib.error import URLError
import os
import os.path
import threading
import time
from datetime import timedelta
import urllib.parse
import re
import urllib.robotparser
from urllib3.util import is_connection_dropped
import errno
import tempdir
import socket
from socket import error as socket_error
from celery import shared_task
from celery.exceptions import SoftTimeLimitExceeded
from celery.signals import task_failure
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, NoSuchElementException, NoSuchFrameException, TimeoutException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.proxy import ProxyType, Proxy
from pyvirtualdisplay import Display
import warcprox
from warcprox.controller import WarcproxController
from warcprox.warcproxy import WarcProxyHandler
from warcprox.mitmproxy import ProxyingRecordingHTTPResponse
from warcprox.mitmproxy import http_client
from warcprox.mitmproxy import MitmProxyHandler, socks, ssl
import requests
from requests.structures import CaseInsensitiveDict
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import internetarchive
from django.core.files.storage import default_storage
from django.core.mail import mail_admins
from django.template.defaultfilters import truncatechars
from django.conf import settings
from django.utils import timezone
from django.urls import reverse
from django.http import HttpRequest
from perma.models import WeekStats, MinuteStats, Registrar, LinkUser, Link, Organization, Capture, CaptureJob, UncaughtError
from perma.email import send_self_email
from perma.exceptions import PermaPaymentsCommunicationException
from perma.utils import (run_task, url_in_allowed_ip_range,
copy_file_data, preserve_perma_warc, write_warc_records_recorded_from_web,
write_resource_record_from_asset, protocol, remove_control_characters,
user_agent_for_domain, Sec1TLSAdapter)
from perma import site_scripts
import logging
logger = logging.getLogger('celery.django')
### CONSTANTS ###
RESOURCE_LOAD_TIMEOUT = settings.RESOURCE_LOAD_TIMEOUT # seconds to wait for at least one resource to load before giving up on capture
ROBOTS_TXT_TIMEOUT = 30 # seconds to wait before giving up on robots.txt
ONLOAD_EVENT_TIMEOUT = 30 # seconds to wait before giving up on the onLoad event and proceeding as though it fired
ELEMENT_DISCOVERY_TIMEOUT = 2 # seconds before the browser gives up running a DOM request (should be instant, assuming page is loaded)
AFTER_LOAD_TIMEOUT = 25 # seconds to allow page to keep loading additional resources after onLoad event fires
SHUTDOWN_GRACE_PERIOD = settings.SHUTDOWN_GRACE_PERIOD # seconds to allow slow threads to finish before we complete the capture job
VALID_FAVICON_MIME_TYPES = {'image/png', 'image/gif', 'image/jpg', 'image/jpeg', 'image/x-icon', 'image/vnd.microsoft.icon', 'image/ico'}
BROWSER_SIZE = [1024, 800]
### ERROR REPORTING ###
@task_failure.connect()
def celery_task_failure_email(**kwargs):
"""
Celery 4.0 onward has no method to send emails on failed tasks
so this event handler is intended to replace it. It reports truly failed
tasks, such as those terminated after CELERY_TASK_TIME_LIMIT.
From https://github.com/celery/celery/issues/3389
"""
subject = u"[Django][{queue_name}@{host}] Error: Task {sender.name} ({task_id}): {exception}".format(
queue_name=u'celery',
host=socket.gethostname(),
**kwargs
)
message = u"""Task {sender.name} with id {task_id} raised exception:
{exception!r}
Task was called with args: {args} kwargs: {kwargs}.
The contents of the full traceback was:
{einfo}
""".format(
**kwargs
)
mail_admins(subject, message)
### THREAD HELPERS ###
def add_thread(thread_list, target, **kwargs):
if not isinstance(target, threading.Thread):
target = threading.Thread(target=target, **kwargs)
target.start()
thread_list.append(target)
return target
def safe_save_fields(instance, **kwargs):
"""
Update and save the given fields for a model instance.
Use update_fields so we won't step on changes to other fields made in another thread.
"""
for key, val in kwargs.items():
setattr(instance, key, val)
instance.save(update_fields=list(kwargs.keys()))
def get_url(url, thread_list, proxy_address, requested_urls, proxied_responses, user_agent):
"""
Get a url, via proxied python requests.get(), in a way that is interruptable from other threads.
Blocks calling thread. (Recommended: only call in sub-threads.)
"""
request_thread = add_thread(thread_list, ProxiedRequestThread(proxy_address, url, requested_urls, proxied_responses, user_agent))
request_thread.join()
return request_thread.response, request_thread.response_exception
class ProxiedRequestThread(threading.Thread):
"""
Run python request.get() in a thread, loading data in chunks.
Listen for self.stop to be set, allowing requests to be halted by other threads.
While the thread is running, see `self.pending_data` for how much has been downloaded so far.
Once the thread is done, see `self.response` and `self.response_exception` for the results.
"""
def __init__(self, proxy_address, url, requested_urls, proxied_responses, user_agent, *args, **kwargs):
self.url = url
self.user_agent = user_agent
self.proxy_address = proxy_address
self.pending_data = 0
self.stop = threading.Event()
self.response = None
self.response_exception = None
self.requested_urls = requested_urls
self.proxied_responses = proxied_responses
super(ProxiedRequestThread, self).__init__(*args, **kwargs)
def run(self):
self.requested_urls.add(self.url)
if self.proxied_responses["limit_reached"]:
return
try:
with requests.Session() as s:
# Lower our standards for the required TLS security level
s.mount('https://', Sec1TLSAdapter())
request = requests.Request(
'GET',
self.url,
headers={'User-Agent': self.user_agent, **settings.CAPTURE_HEADERS}
)
self.response = s.send(
request.prepare(),
proxies={'http': 'http://' + self.proxy_address, 'https': 'http://' + self.proxy_address},
verify=False,
stream=True,
timeout=1
)
self.response._content = bytes()
for chunk in self.response.iter_content(chunk_size=8192):
self.pending_data += len(chunk)
self.response._content += chunk
if self.stop.is_set() or self.proxied_responses["limit_reached"]:
return
except requests.RequestException as e:
self.response_exception = e
finally:
if self.response:
self.response.close()
self.pending_data = 0
class HaltCaptureException(Exception):
"""
An exception we can trigger to halt capture and release
all involved resources.
"""
pass
# WARCPROX HELPERS
# monkeypatch ProxyingRecorder to grab headers of proxied response
_orig_begin = ProxyingRecordingHTTPResponse.begin
def begin(self, extra_response_headers={}):
_orig_begin(self, extra_response_headers={})
self.recorder.headers = self.msg
ProxyingRecordingHTTPResponse.begin = begin
# get a copy of warcprox's proxy function, which we can use to
# monkey-patch the function freshly on each call of run_next_capture
_real_proxy_request = WarcProxyHandler._proxy_request
# get a copy of warcprox's connection function, which we can use to
# monkey-patch the function freshly on each call of run_next_capture
_orig_connect_to_remote_server = MitmProxyHandler._connect_to_remote_server
# BROWSER HELPERS
def start_virtual_display():
display = Display(visible=0, size=BROWSER_SIZE)
display.start()
return display
def get_browser(user_agent, proxy_address, cert_path):
""" Set up a Selenium browser with given user agent, proxy and SSL cert. """
display = None
print("Using browser: %s" % settings.CAPTURE_BROWSER)
# Firefox
if settings.CAPTURE_BROWSER == 'Firefox':
display = start_virtual_display()
desired_capabilities = dict(DesiredCapabilities.FIREFOX)
proxy = Proxy({
'proxyType': ProxyType.MANUAL,
'httpProxy': proxy_address,
'ftpProxy': proxy_address,
'sslProxy': proxy_address,
})
proxy.add_to_capabilities(desired_capabilities)
profile = webdriver.FirefoxProfile()
profile.accept_untrusted_certs = True
profile.assume_untrusted_cert_issuer = True
browser = webdriver.Firefox(
capabilities=desired_capabilities,
firefox_profile=profile)
# Chrome
elif settings.CAPTURE_BROWSER == 'Chrome':
# http://blog.likewise.org/2015/01/setting-up-chromedriver-and-the-selenium-webdriver-python-bindings-on-ubuntu-14-dot-04/
# and from 2017-04-17: https://intoli.com/blog/running-selenium-with-headless-chrome/
download_dir = os.path.abspath('./downloads')
os.mkdir(download_dir)
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument(f'user-agent={user_agent}')
chrome_options.add_argument('proxy-server=%s' % proxy_address)
chrome_options.add_argument('headless')
chrome_options.add_argument('disable-gpu')
chrome_options.add_argument('no-sandbox')
chrome_options.add_argument('hide-scrollbars')
chrome_options.add_experimental_option("prefs", {"profile.default_content_settings.popups": "0",
"download.default_directory": download_dir,
"download.prompt_for_download": "false"})
if settings.DISABLE_DEV_SHM:
chrome_options.add_argument('disable-dev-shm-usage')
desired_capabilities = chrome_options.to_capabilities()
desired_capabilities["acceptSslCerts"] = True
browser = webdriver.Chrome(desired_capabilities=desired_capabilities)
else:
assert False, "Invalid value for CAPTURE_BROWSER."
browser.implicitly_wait(ELEMENT_DISCOVERY_TIMEOUT)
browser.set_page_load_timeout(ROBOTS_TXT_TIMEOUT)
return browser, display
def browser_still_running(browser):
return browser.service.process.poll() is None
def scroll_browser(browser):
"""scroll to bottom of page"""
# TODO: This doesn't scroll horizontally or scroll frames
try:
scroll_delay = browser.execute_script("""
// Scroll down the page in a series of jumps the size of the window height.
// The actual scrolling is done in a setTimeout with a 50ms delay so the browser has
// time to render at each position.
var delay=50,
height=document.body.scrollHeight,
jump=window.innerHeight,
scrollTo=function(scrollY){ window.scrollTo(0, scrollY) },
i=1;
for(;i*jump<height;i++){
setTimeout(scrollTo, i*delay, i*jump);
}
// Scroll back to top before taking screenshot.
setTimeout(scrollTo, i*delay, 0);
// Return how long all this scrolling will take.
return (i*delay)/1000;
""")
# In python, wait for javascript background scrolling to finish.
time.sleep(min(scroll_delay,1))
except (WebDriverException, TimeoutException, CannotSendRequest, URLError):
# Don't panic if we can't scroll -- we've already captured something useful anyway.
# WebDriverException: the page can't execute JS for some reason.
# URLError: the headless browser has gone away for some reason.
pass
def get_page_source(browser):
"""
Get page source.
Use JS rather than browser.page_source so we get the parsed, properly formatted DOM instead of raw user HTML.
"""
try:
return browser.execute_script("return document.documentElement.outerHTML")
except (WebDriverException, TimeoutException, CannotSendRequest):
return browser.page_source
def parse_page_source(source):
"""
Return page source as a parsed PyQuery object for querying.
PyQuery here works the same as `$(source)` in jQuery. So for example you can do `parsed_source(selector)`
with the returned value to get a list of LXML elements matching the selector.
"""
return PyQuery(source, parser='html')
def get_dom_tree(browser):
with browser_running(browser):
return parse_page_source(get_page_source(browser))
def get_all_dom_trees(browser):
with browser_running(browser):
return run_in_frames(browser, lambda browser: [[browser.current_url, get_dom_tree(browser)]])
def run_in_frames(browser, func, output_collector=None):
# setup
browser.implicitly_wait(0)
if output_collector is None:
output_collector = []
run_in_frames_recursive(browser, func, output_collector)
# reset
browser.implicitly_wait(ELEMENT_DISCOVERY_TIMEOUT)
browser.switch_to.default_content()
return output_collector
def run_in_frames_recursive(browser, func, output_collector, frame_path=None):
DEPTH_LIMIT = 3 # deepest frame level we'll visit
FRAME_LIMIT = 20 # max total frames we'll visit
if frame_path is None:
frame_path = []
with browser_running(browser):
# slow to run, so only uncomment logging if needed for debugging:
# import hashlib
# print frame_path, browser.find_elements_by_tag_name('html')[0]._id, hashlib.sha256(browser.page_source.encode('utf8')).hexdigest(), browser.execute_script("return window.location.href")
# attempt to get iframe url, skipping the iframe if attempt fails
# (usually due to content security policy)
try:
current_url = browser.current_url
except (WebDriverException, TimeoutException):
return
# skip about:blank, about:srcdoc, and any other non-http frames
if not (current_url.startswith('http:') or current_url.startswith('https:')):
return
# run func in current frame
output_collector += func(browser)
# stop looking for subframes if we hit depth limit
if len(frame_path) > DEPTH_LIMIT:
return
# run in subframes of current frame
for i in range(FRAME_LIMIT):
# stop looking for subframes if we hit total frames limit
if len(output_collector) > FRAME_LIMIT:
return
# call self recursively in child frame i
try:
browser.switch_to.frame(i)
run_in_frames_recursive(browser, func, output_collector, frame_path + [i])
except NoSuchFrameException:
# we've run out of subframes
break
except ValueError:
# switching to frame failed for some reason (does this still apply?)
print("run_in_frames_recursive caught exception switching to iframe:")
traceback.print_exc()
# return to current frame
browser.switch_to.default_content()
try:
for frame in frame_path:
browser.switch_to.frame(frame)
except NoSuchFrameException:
# frame hierarchy changed; frame_path is invalid
print("frame hierarchy changed while running run_in_frames_recursive")
break
### UTILS ###
def repeat_while_exception(func, arglist=[], exception=Exception, timeout=10, sleep_time=.1, raise_after_timeout=True):
"""
Keep running a function until it completes without raising an exception,
or until "timeout" is reached.
Useful when retrieving page elements via Selenium.
"""
end_time = time.time() + timeout
while True:
try:
return func(*arglist)
except SoftTimeLimitExceeded:
raise
except exception:
if time.time() > end_time:
if raise_after_timeout:
raise
return
time.sleep(sleep_time)
def repeat_until_truthy(func, arglist=[], timeout=10, sleep_time=.1):
"""
Keep running a function until it returns a truthy value, or until
"timeout" is reached. No exception handling.
Useful when retrieving page elements via javascript run by Selenium.
"""
end_time = time.time() + timeout
result = None
while not result:
if time.time() > end_time:
break
result = func(*arglist)
time.sleep(sleep_time)
return result
def sleep_unless_seconds_passed(seconds, start_time):
delta = time.time() - start_time
if delta < seconds:
wait = seconds - delta
print("Sleeping for {}s".format(wait))
time.sleep(wait)
# CAPTURE HELPERS
def inc_progress(capture_job, inc, description):
capture_job.inc_progress(inc, description)
print("%s step %s: %s" % (capture_job.link.guid, capture_job.step_count, capture_job.step_description))
def capture_current_size(thread_list, recorded):
"""
Amount captured so far is the sum of the bytes recorded by warcprox,
and the bytes pending in our background threads.
"""
return recorded + sum(getattr(thread, 'pending_data', 0) for thread in thread_list)
class CaptureCurrentSizeThread(threading.Thread):
"""
Listen for self.stop to be set, allowing the thread to be halted by other threads.
"""
def __init__(self, thread_list, proxied_responses, *args, **kwargs):
self.stop = threading.Event()
self.thread_list = thread_list
self.proxied_responses = proxied_responses
# include 'pending data' for a consistent API with other threads on the thread_list
self.pending_data = 0
super(CaptureCurrentSizeThread, self).__init__(*args, **kwargs)
def run(self):
while True:
if self.stop.is_set():
return
if capture_current_size(self.thread_list, self.proxied_responses["size"]) > settings.MAX_ARCHIVE_FILE_SIZE:
self.proxied_responses["limit_reached"] = True
print("Size limit reached.")
return
time.sleep(.2)
def make_absolute_urls(base_url, urls):
"""collect resource urls, converted to absolute urls relative to current browser frame"""
return [urllib.parse.urljoin(base_url, url) for url in urls if url]
def parse_headers(msg):
"""
Given an http.client.HTTPMessage, returns a parsed dict
"""
headers = CaseInsensitiveDict(msg.items())
# Reset headers['x-robots-tag'], so that we can handle the
# possibilility that multiple x-robots directives might be included
# https://developers.google.com/webmasters/control-crawl-index/docs/robots_meta_tag
# e.g.
# HTTP/1.1 200 OK
# Date: Tue, 25 May 2010 21:42:43 GMT
# (...)
# X-Robots-Tag: googlebot: nofollow
# X-Robots-Tag: otherbot: noindex, nofollow
# (...)
# Join with a semi-colon, not a comma, so that multiple agents can
# be recovered. As of 12/14/16, there doesn't appear to be any spec
# describing how to do this properly (since commas don't work).
# Since parsed response headers aren't archived, this convenience is
# fine. However, it's worth keeping track of the situation.
robots_directives = []
# https://bugs.python.org/issue5053
# https://bugs.python.org/issue13425
directives = msg.get_all('x-robots-tag')
if directives:
for directive in directives:
robots_directives.append(directive.replace("\n", "").replace("\r", ""))
headers['x-robots-tag'] = ";".join(robots_directives)
return headers
### CAPTURE COMPONENTS ###
# on load
# By domain, code to run after the target_url's page onload event.
post_load_function_lookup = {
"^https?://www.forbes.com/forbes/welcome": site_scripts.forbes_post_load,
"^https?://rwi.app/iurisprudentia": site_scripts.iurisprudentia_post_load,
}
def get_post_load_function(current_url):
for regex, post_load_function in post_load_function_lookup.items():
if re.search(regex, current_url.lower()):
return post_load_function
return None
# x-robots headers
def xrobots_blacklists_perma(robots_directives):
darchive = False
if robots_directives:
for directive in robots_directives.split(";"):
parsed = directive.lower().split(":")
# respect tags that target all crawlers (no user-agent specified)
if settings.PRIVATE_LINKS_IF_GENERIC_NOARCHIVE and len(parsed) == 1:
if "noarchive" in parsed:
darchive = True
# look for perma user-agent
elif len(parsed) == 2:
if parsed[0] == "perma" and "noarchive" in parsed[1]:
darchive = True
# if the directive is poorly formed, do our best
else:
if "perma" in directive and "noarchive" in directive:
darchive = True
return darchive
# page metadata
def get_metadata(page_metadata, dom_tree):
"""
Retrieve html page metadata.
"""
if page_metadata.get('title'):
page_metadata['meta_tags'] = get_meta_tags(dom_tree)
else:
page_metadata.update({
'meta_tags': get_meta_tags(dom_tree),
'title': get_title(dom_tree)
})
def get_meta_tags(dom_tree):
"""
Retrieves meta tags as a dict (e.g. {"robots": "noarchive"}).
The keys of the dict are the "name" attributes of the meta tags (if
any) and the values are the corresponding "content" attributes.
Later-encountered tags overwrite earlier-encountered tags, if a
"name" attribute is duplicated in the html. Tags without name
attributes are thrown away (their "content" attribute is mapped
to the key "", the empty string).
"""
return {tag.attrib['name'].lower(): tag.attrib.get('content', '')
for tag in dom_tree('meta')
if tag.attrib.get('name')}
def get_title(dom_tree):
return dom_tree('head > title').text()
def meta_tag_analysis_failed(link):
"""What to do if analysis of a link's meta tags fails"""
if settings.PRIVATE_LINKS_ON_FAILURE:
safe_save_fields(link, is_private=True, private_reason='failure')
print("Meta tag retrieval failure.")
link.tags.add('meta-tag-retrieval-failure')
# robots.txt
def robots_txt_thread(link, target_url, content_url, thread_list, proxy_address, requested_urls, proxied_responses, user_agent):
robots_txt_location = urllib.parse.urljoin(content_url, '/robots.txt')
robots_txt_response, e = get_url(robots_txt_location, thread_list, proxy_address, requested_urls, proxied_responses, user_agent)
if e or not robots_txt_response or not robots_txt_response.ok:
print("Couldn't reach robots.txt")
return
print("Robots.txt fetched.")
# We only want to respect robots.txt if Perma is specifically asked not to archive (we're not a crawler)
content = str(robots_txt_response.content, 'utf-8')
if 'Perma' in content:
# We found Perma specifically mentioned
rp = urllib.robotparser.RobotFileParser()
rp.parse([line.strip() for line in content.split('\n')])
if not rp.can_fetch('Perma', target_url):
safe_save_fields(link, is_private=True, private_reason='policy')
print("Robots.txt disallows Perma.")
# favicons
def favicon_thread(successful_favicon_urls, dom_tree, content_url, thread_list, proxy_address, requested_urls, proxied_responses, user_agent):
favicon_urls = favicon_get_urls(dom_tree, content_url)
for favicon_url in favicon_urls:
favicon = favicon_fetch(favicon_url, thread_list, proxy_address, requested_urls, proxied_responses, user_agent)
if favicon:
successful_favicon_urls.append(favicon)
if not successful_favicon_urls:
print("Couldn't get any favicons")
def favicon_get_urls(dom_tree, content_url):
"""
Retrieve favicon URLs from DOM.
"""
urls = [] # order here matters so that we prefer meta tag favicon over /favicon.ico
for el in dom_tree('link'):
if el.attrib.get('rel', '').lower() in ("shortcut icon", "icon"):
href = el.attrib.get('href')
if href:
urls.append(href)
urls.append('/favicon.ico')
urls = make_absolute_urls(content_url, urls)
urls = list(OrderedDict((url, True) for url in urls).keys()) # remove duplicates without changing list order
return urls
def favicon_fetch(url, thread_list, proxy_address, requested_urls, proxied_responses, user_agent):
print("Fetching favicon from %s ..." % url)
response, e = get_url(url, thread_list, proxy_address, requested_urls, proxied_responses, user_agent)
if e or not response or not response.ok:
print("Favicon failed:", e, response)
return
# apply mime type whitelist
mime_type = response.headers.get('content-type', '').split(';')[0]
if mime_type in VALID_FAVICON_MIME_TYPES:
return (url, mime_type)
# media
def get_media_tags(dom_trees):
urls = set()
for base_url, dom_tree in dom_trees:
print("Fetching images in srcsets")
new_urls = get_srcset_image_urls(dom_tree)
if settings.ENABLE_AV_CAPTURE:
print("Fetching audio/video objects")
new_urls += get_audio_video_urls(dom_tree)
new_urls += get_object_urls(dom_tree)
urls |= set(make_absolute_urls(base_url, new_urls))
return urls
def get_srcset_image_urls(dom_tree):
"""
Return all urls listed in img/src srcset attributes.
"""
urls = []
for el in dom_tree('img[srcset], source[srcset]'):
for src in el.attrib.get('srcset', '').split(','):
src = src.strip().split()[0]
if src:
urls.append(src)
# Get src, too: Chrome (and presumably Firefox) doesn't do
# this automatically.
for el in dom_tree('img[src]'):
urls.append(el.attrib.get('src', ''))
return urls
def get_audio_video_urls(dom_tree):
"""
Return urls listed in video/audio/embed/source tag src attributes.
"""
urls = []
for el in dom_tree('video, audio, embed, source'):
src = el.attrib.get('src', '').strip()
if src:
urls.append(src)
return urls
def get_object_urls(dom_tree):
"""
Return urls in object tag data/archive attributes, as well as object -> param[name="movie"] tag value attributes.
Urls will be relative to the object tag codebase attribute if it exists.
"""
urls = []
for el in dom_tree('object'):
codebase_url = el.attrib.get('codebase')
el_urls = [el.attrib.get('data', '')] + \
el.attrib.get('archive', '').split() + \
[param.attrib.get('value', '') for param in PyQuery(el)('param[name="movie"]')]
for url in el_urls:
url = url.strip()
if url:
if codebase_url:
url = urllib.parse.urljoin(codebase_url, url)
urls.append(url)
return urls
# screenshot
def get_screenshot(link, browser):
page_size = get_page_size(browser)
if page_pixels_in_allowed_range(page_size):
if settings.CAPTURE_BROWSER == 'Chrome':
# set window size to page size in Chrome, so we get a full-page screenshot:
browser.set_window_size(max(page_size['width'], BROWSER_SIZE[0]), max(page_size['height'], BROWSER_SIZE[1]))
return browser.get_screenshot_as_png()
else:
print("Not taking screenshot! %s" % ("Page size is %s." % (page_size,)))
safe_save_fields(link.screenshot_capture, status='failed')
def get_page_size(browser):
try:
return browser.execute_script("""
var body = document.body;
var html = document.documentElement;
var height = Math.max(
body.scrollHeight,
body.offsetHeight,
html.clientHeight,
html.scrollHeight,
html.offsetHeight
);
var width = Math.max(
body.scrollWidth,
body.offsetWidth,
html.clientWidth,
html.scrollWidth,
html.offsetWidth
);
return {'height': height, 'width': width}
""")
except Exception:
try:
root_element = browser.find_element_by_tag_name('html')
except (TimeoutException, NoSuchElementException, URLError, CannotSendRequest):
try:
root_element = browser.find_element_by_tag_name('frameset')
except (TimeoutException, NoSuchElementException, URLError, CannotSendRequest):
# NoSuchElementException: HTML structure is weird somehow.
# URLError: the headless browser has gone away for some reason.
root_element = None
if root_element:
try:
return root_element.size
except WebDriverException:
# If there is no "body" element, a WebDriverException is thrown.
# Skip the screenshot in that case: nothing to see
pass
def page_pixels_in_allowed_range(page_size):
return page_size and page_size['width'] * page_size['height'] < settings.MAX_IMAGE_SIZE
### CAPTURE COMPLETION
def teardown(link, thread_list, browser, display, warcprox_controller, warcprox_thread):
print("Shutting down browser and proxies.")
for thread in thread_list:
# wait until threads are done
if hasattr(thread, 'stop'):
thread.stop.set()
thread.join()
if browser:
if not browser_still_running(browser):
link.tags.add('browser-crashed')
browser.quit()
if display:
display.stop() # shut down virtual display
if warcprox_controller:
warcprox_controller.stop.set() # send signals to shut down warc threads
warcprox_controller.proxy.pool.shutdown(wait=False) # non-blocking
if warcprox_thread:
warcprox_thread.join() # wait until warcprox thread is done
# wait for stray MitmProxyHandler threads
shutdown_time = time.time()
while True:
if time.time() - shutdown_time > SHUTDOWN_GRACE_PERIOD:
break
threads = threading.enumerate()
print("{} active threads.".format(len(threads)))
if not any('MitmProxyHandler' in thread.name for thread in threads):
break
print("Waiting for MitmProxyHandler")
time.sleep(1)
if warcprox_controller:
warcprox_controller.warc_writer_processor.writer_pool.close_writers() # blocking
def process_metadata(metadata, link):
## Privacy Related ##
meta_tag = metadata['meta_tags'].get('perma')
if settings.PRIVATE_LINKS_IF_GENERIC_NOARCHIVE and not meta_tag:
meta_tag = metadata['meta_tags'].get('robots')
if meta_tag and 'noarchive' in meta_tag.lower():
safe_save_fields(link, is_private=True, private_reason='policy')
print("Meta found, darchiving")
## Page Description ##
description_meta_tag = metadata['meta_tags'].get('description')
if description_meta_tag:
safe_save_fields(link, submitted_description=description_meta_tag)
## Page Title
safe_save_fields(link, submitted_title=metadata['title'])
def save_warc(warcprox_controller, capture_job, link, content_type, screenshot, successful_favicon_urls):
# save a single warc, comprising all recorded recorded content and the screenshot
recorded_warc_path = os.path.join(
os.getcwd(),
warcprox_controller.options.directory,
"{}.warc.gz".format(warcprox_controller.options.warc_filename)
)
warc_size = [] # pass a mutable container to the context manager, so that it can populate it with the size of the finished warc
with open(recorded_warc_path, 'rb') as recorded_warc_records, \
preserve_perma_warc(link.guid, link.creation_timestamp, link.warc_storage_file(), warc_size) as perma_warc:
# screenshot first, per Perma custom
if screenshot:
write_resource_record_from_asset(screenshot, link.screenshot_capture.url, link.screenshot_capture.content_type, perma_warc)
# then recorded content
write_warc_records_recorded_from_web(recorded_warc_records, perma_warc)
# update the db to indicate we succeeded
safe_save_fields(
link,
warc_size=warc_size[0]
)
safe_save_fields(
link.primary_capture,
status='success',
content_type=content_type
)
if screenshot:
safe_save_fields(
link.screenshot_capture,
status='success'
)
save_favicons(link, successful_favicon_urls)
capture_job.mark_completed()
def save_favicons(link, successful_favicon_urls):
if successful_favicon_urls:
Capture(
link=link,
role='favicon',
status='success',
record_type='response',
url=successful_favicon_urls[0][0],
content_type=successful_favicon_urls[0][1].lower()
).save()
print("Saved favicons %s" % successful_favicon_urls)
def clean_up_failed_captures():
"""
Clean up any existing jobs that are marked in_progress but must have timed out by now, based on our hard timeout
setting.
"""
# use database time with a custom where clause to ensure consistent time across workers
for capture_job in CaptureJob.objects.filter(status='in_progress').select_related('link').extra(
where=["capture_start_time < now() - INTERVAL %s second" % settings.CELERY_TASK_TIME_LIMIT]
):
capture_job.mark_failed("Timed out.")
capture_job.link.captures.filter(status='pending').update(status='failed')
capture_job.link.tags.add('hard-timeout-failure')
### CONTEXT MANAGERS
@contextmanager
def warn_on_exception(message="Exception in block:", exception_type=Exception):
try:
yield
except SoftTimeLimitExceeded:
raise
except exception_type as e:
print(message, e)
@contextmanager
def browser_running(browser, onfailure=None):
if browser_still_running(browser):
yield
else:
print("Browser crashed")
if onfailure:
onfailure()
raise HaltCaptureException
### TASKS ##
@shared_task
@tempdir.run_in_tempdir()
def run_next_capture():
"""
Grab and run the next CaptureJob. This will keep calling itself until there are no jobs left.
"""
clean_up_failed_captures()
# get job to work on
capture_job = CaptureJob.get_next_job(reserve=True)
if not capture_job:
return # no jobs waiting
try:
# Start warcprox process. Warcprox is a MITM proxy server and needs to be running
# before, during and after the headless browser.
#
# Start a headless browser to capture the supplied URL. Also take a screenshot if the URL is an HTML file.
#
# This whole function runs with the local dir set to a temp dir by run_in_tempdir().
# So we can use local paths for temp files, and they'll just disappear when the function exits.
# basic setup
start_time = time.time()
link = capture_job.link
target_url = link.ascii_safe_url
browser = warcprox_controller = warcprox_thread = display = screenshot = content_type = None
have_content = have_html = False
thread_list = []
page_metadata = {}
successful_favicon_urls = []
requested_urls = set() # all URLs we have requested -- used to avoid duplicate requests
stop = False
proxy = False
capture_user_agent = user_agent_for_domain(link.url_details.netloc)
print("Using user-agent: %s" % capture_user_agent)
if settings.PROXY_CAPTURES and any(domain in link.url_details.netloc for domain in settings.DOMAINS_TO_PROXY):
proxy = True
print("Using proxy.")
# A default title is added in models.py, if an api user has not specified a title.
# Make sure not to override it during the capture process.
if link.submitted_title != link.get_default_title():
page_metadata = {
'title': link.submitted_title
}
# Get started, unless the user has deleted the capture in the meantime
inc_progress(capture_job, 0, "Starting capture")
if link.user_deleted or link.primary_capture.status != "pending":
capture_job.mark_completed('deleted')
return
capture_job.attempt += 1
capture_job.save()
# BEGIN WARCPROX SETUP
# Create a request handler class that tracks requests and responses
# via in-scope, shared mutable containers. (Patch inside capture function
# so the containers are initialized empty for every new capture.)
proxied_responses = {
"any": False,
"size": 0,
"limit_reached": False
}
proxied_pairs = []
tracker_lock = threading.Lock()
# Patch Warcprox's inner proxy function to be interruptible,
# to prevent thread leak and permit the partial capture of streamed content.
# See https://github.com/harvard-lil/perma/issues/2019
def stoppable_proxy_request(self, extra_response_headers={}):
'''
Sends the request to the remote server, then uses a ProxyingRecorder to
read the response and send it to the proxy client, while recording the
bytes in transit. Returns a tuple (request, response) where request is
the raw request bytes, and response is a ProxyingRecorder.
:param extra_response_headers: generated on warcprox._proxy_request.
It may contain extra HTTP headers such as ``Warcprox-Meta`` which
are written in the WARC record for this request.
'''
# Build request
req_str = '{} {} {}\r\n'.format(
self.command, self.path, self.request_version)
# Swallow headers that don't make sense to forward on, i.e. most
# hop-by-hop headers. http://tools.ietf.org/html/rfc2616#section-13.5.
# self.headers is an email.message.Message, which is case-insensitive
# and doesn't throw KeyError in __delitem__
for key in (
'Connection', 'Proxy-Connection', 'Keep-Alive',
'Proxy-Authenticate', 'Proxy-Authorization', 'Upgrade'):
del self.headers[key]
self.headers['Via'] = warcprox.mitmproxy.via_header_value(
self.headers.get('Via'),
self.request_version.replace('HTTP/', ''))
# Add headers to the request
# XXX in at least python3.3 str(self.headers) uses \n not \r\n :(
req_str += '\r\n'.join(
'{}: {}'.format(k,v) for (k,v) in self.headers.items())
req = req_str.encode('latin1') + b'\r\n\r\n'
# Append message body if present to the request
if 'Content-Length' in self.headers:
req += self.rfile.read(int(self.headers['Content-Length']))
prox_rec_res = None
start = time.time()
try:
self.logger.debug('sending to remote server req=%r' % req)
# Send it down the pipe!
self._remote_server_conn.sock.sendall(req)
prox_rec_res = ProxyingRecordingHTTPResponse(
self._remote_server_conn.sock, proxy_client=self.connection,
digest_algorithm=self.server.digest_algorithm,
url=self.url, method=self.command,
tmp_file_max_memory_size=self._tmp_file_max_memory_size)
prox_rec_res.begin(extra_response_headers=extra_response_headers)
buf = None
while buf != b'':
try:
buf = prox_rec_res.read(65536)
except http_client.IncompleteRead as e:
self.logger.warn('%s from %s' %(e, self.url))
buf = e.partial
if buf:
proxied_responses["size"] += len(buf)
if (self._max_resource_size and
prox_rec_res.recorder.len > self._max_resource_size):
prox_rec_res.truncated = b'length'
self._remote_server_conn.sock.shutdown(socket.SHUT_RDWR)
self._remote_server_conn.sock.close()
self.logger.info(
'truncating response because max resource size %d '
'bytes exceeded for URL %s' %
(self._max_resource_size, self.url))
break
elif ('content-length' not in self.headers and
time.time() - start > 3 * 60 * 60):
prox_rec_res.truncated = b'time'
self._remote_server_conn.sock.shutdown(socket.SHUT_RDWR)
self._remote_server_conn.sock.close()
self.logger.info(
'reached hard timeout of 3 hours fetching url '
'without content-length: %s' % self.url)
break
# begin Perma changes #
if stop:
prox_rec_res.truncated = b'length'
self._remote_server_conn.sock.shutdown(socket.SHUT_RDWR)
self._remote_server_conn.sock.close()
self.logger.info(
'truncating response because stop signal received '
'while recording %s' %
self.url)
break
# end Perma changes #
self.log_request(prox_rec_res.status, prox_rec_res.recorder.len)
# Let's close off the remote end. If remote connection is fine,
# put it back in the pool to reuse it later.
if not is_connection_dropped(self._remote_server_conn):
self._conn_pool._put_conn(self._remote_server_conn)
except Exception as e:
# A common error is to connect to the remote server successfully
# but raise a `RemoteDisconnected` exception when trying to begin
# downloading. Its caused by prox_rec_res.begin(...) which calls
# http_client._read_status(). The connection fails there.
# https://github.com/python/cpython/blob/3.7/Lib/http/client.py#L275
# Another case is when the connection is fine but the response
# status is problematic, raising `BadStatusLine`.
# https://github.com/python/cpython/blob/3.7/Lib/http/client.py#L296
# In both cases, the host is bad and we must add it to
# `bad_hostnames_ports` cache.
if isinstance(e, (http_client.RemoteDisconnected,
http_client.BadStatusLine)):
host_port = self._hostname_port_cache_key()
with self.server.bad_hostnames_ports_lock:
self.server.bad_hostnames_ports[host_port] = 502
self.logger.info('bad_hostnames_ports cache size: %d' %
len(self.server.bad_hostnames_ports))
# Close the connection only if its still open. If its already
# closed, an `OSError` "([Errno 107] Transport endpoint is not
# connected)" would be raised.
if not is_connection_dropped(self._remote_server_conn):
self._remote_server_conn.sock.shutdown(socket.SHUT_RDWR)
self._remote_server_conn.sock.close()
raise
finally:
if prox_rec_res:
prox_rec_res.close()
return req, prox_rec_res
warcprox.mitmproxy.MitmProxyHandler._inner_proxy_request = stoppable_proxy_request
def _proxy_request(self):
# make sure we don't capture anything in a banned IP range
if not url_in_allowed_ip_range(self.url):
return
# skip request if downloaded size exceeds MAX_ARCHIVE_FILE_SIZE.
if proxied_responses["limit_reached"]:
return
with tracker_lock:
proxied_pair = [self.url, None]
requested_urls.add(proxied_pair[0])
proxied_pairs.append(proxied_pair)
try:
response = _real_proxy_request(self)
except Exception as e:
# If warcprox can't handle a request/response for some reason,
# remove the proxied pair so that it doesn't keep trying and
# the capture process can proceed
proxied_pairs.remove(proxied_pair)
print("WarcProx exception: %s proxying %s" % (e.__class__.__name__, proxied_pair[0]))
return # swallow exception
with tracker_lock:
if response:
proxied_responses["any"] = True
proxied_pair[1] = response
else:
# in some cases (502? others?) warcprox is not returning a response
proxied_pairs.remove(proxied_pair)
WarcProxyHandler._proxy_request = _proxy_request
# patch warcprox's to go through proxy whenever onion_tor_socks_proxy_host is set
def _connect_to_remote_server(self):
self._conn_pool = self.server.remote_connection_pool.connection_from_host(
host=self.hostname, port=int(self.port), scheme='http',
pool_kwargs={'maxsize': 12, 'timeout': self._socket_timeout})
remote_ip = None
self._remote_server_conn = self._conn_pool._get_conn()
if is_connection_dropped(self._remote_server_conn):
if self.onion_tor_socks_proxy_host: # Perma removed `and self.hostname.endswith('.onion')`
self.logger.info(
"using tor socks proxy at %s:%s to connect to %s",
self.onion_tor_socks_proxy_host,
self.onion_tor_socks_proxy_port or 1080, self.hostname)
self._remote_server_conn.sock = socks.socksocket()
self._remote_server_conn.sock.set_proxy(
socks.SOCKS5, addr=self.onion_tor_socks_proxy_host,
port=self.onion_tor_socks_proxy_port, rdns=True,
username="user", password=<PASSWORD>) # Perma added username and password, to force new IPs
self._remote_server_conn.sock.settimeout(self._socket_timeout)
self._remote_server_conn.sock.connect((self.hostname, int(self.port)))
else:
self._remote_server_conn.connect()
remote_ip = self._remote_server_conn.sock.getpeername()[0]
# Wrap socket if SSL is required
if self.is_connect:
try:
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
self._remote_server_conn.sock = context.wrap_socket(
self._remote_server_conn.sock,
server_hostname=self.hostname)
except AttributeError:
try:
self._remote_server_conn.sock = ssl.wrap_socket(
self._remote_server_conn.sock)
except ssl.SSLError:
self.logger.warning(
"failed to establish ssl connection to %s; "
"python ssl library does not support SNI, "
"consider upgrading to python 2.7.9+ or 3.4+",
self.hostname)
raise
except ssl.SSLError as e:
self.logger.error(
'error connecting to %s (%s) port %s: %s',
self.hostname, remote_ip, self.port, e)
raise
return self._remote_server_conn.sock
MitmProxyHandler._connect_to_remote_server = _connect_to_remote_server
# connect warcprox to an open port
warcprox_port = 27500
for i in range(500):
try:
options = warcprox.Options(
address="127.0.0.1",
port=warcprox_port,
max_threads=settings.MAX_PROXY_THREADS,
queue_size=settings.MAX_PROXY_QUEUE_SIZE,
gzip=True,
stats_db_file="",
dedup_db_file="",
directory="./warcs", # default, included so we can retrieve from options object
warc_filename=link.guid,
cacert=os.path.join(settings.SERVICES_DIR, 'warcprox', 'perma-warcprox-ca.pem'),
onion_tor_socks_proxy=settings.PROXY_ADDRESS if proxy else None
)
warcprox_controller = WarcproxController(options)
break
except socket_error as e:
if e.errno != errno.EADDRINUSE:
raise
warcprox_port += 1
else:
raise Exception("WarcProx couldn't find an open port.")
proxy_address = "127.0.0.1:%s" % warcprox_port
# start warcprox in the background
warcprox_thread = threading.Thread(target=warcprox_controller.run_until_shutdown, name="warcprox", args=())
warcprox_thread.start()
print("WarcProx opened.")
# END WARCPROX SETUP
browser, display = get_browser(capture_user_agent, proxy_address, warcprox_controller.proxy.ca.ca_file)
browser.set_window_size(*BROWSER_SIZE)
print("Tracking capture size...")
add_thread(thread_list, CaptureCurrentSizeThread(thread_list, proxied_responses))
# fetch page in the background
inc_progress(capture_job, 1, "Fetching target URL")
page_load_thread = threading.Thread(target=browser.get, name="page_load", args=(target_url,)) # returns after onload
page_load_thread.start()
# before proceeding further, wait until warcprox records a response that isn't a forward
with browser_running(browser):
while not have_content:
if proxied_responses["any"]:
for request, response in proxied_pairs:
if response is None:
# wait for the first response to finish, so we have the best chance
# at successfully identifying the content-type of the target_url
# (in unusual circumstances, can be incorrect)
break
if response.url.endswith(b'/favicon.ico') and response.url != target_url:
continue
if not hasattr(response, 'parsed_headers'):
response.parsed_headers = parse_headers(response.response_recorder.headers)
if response.status in [301, 302, 303, 307, 308, 206]: # redirect or partial content
continue
have_content = True
content_url = str(response.url, 'utf-8')
content_type = getattr(response, 'content_type', None)
content_type = content_type.lower() if content_type else 'text/html; charset=utf-8'
robots_directives = response.parsed_headers.get('x-robots-tag')
have_html = content_type and content_type.startswith('text/html')
break
if have_content:
# we have something that's worth showing to the user;
# break out of "while" before running sleep code below
break
wait_time = time.time() - start_time
if wait_time > RESOURCE_LOAD_TIMEOUT:
raise HaltCaptureException
inc_progress(capture_job, wait_time/RESOURCE_LOAD_TIMEOUT, "Fetching target URL")
time.sleep(1)
print("Fetching robots.txt ...")
add_thread(thread_list, robots_txt_thread, args=(
link,
target_url,
content_url,
thread_list,
proxy_address,
requested_urls,
proxied_responses,
capture_user_agent
))
inc_progress(capture_job, 1, "Checking x-robots-tag directives.")
if xrobots_blacklists_perma(robots_directives):
safe_save_fields(link, is_private=True, private_reason='policy')
print("x-robots-tag found, darchiving")
if have_html:
# Get a copy of the page's metadata immediately, without
# waiting for the page's onload event (which can take a
# long time, and might even crash the browser)
print("Retrieving DOM (pre-onload)")
dom_tree = get_dom_tree(browser)
get_metadata(page_metadata, dom_tree)
# get favicon urls (saved as favicon_capture_url later)
with browser_running(browser):
print("Fetching favicons ...")
add_thread(thread_list, favicon_thread, args=(
successful_favicon_urls,
dom_tree,
content_url,
thread_list,
proxy_address,
requested_urls,
proxied_responses,
capture_user_agent
))
print("Waiting for onload event before proceeding.")
page_load_thread.join(max(0, ONLOAD_EVENT_TIMEOUT - (time.time() - start_time)))
if page_load_thread.is_alive():
print("Onload timed out")
with browser_running(browser):
try:
post_load_function = get_post_load_function(browser.current_url)
except (WebDriverException, TimeoutException, CannotSendRequest):
post_load_function = get_post_load_function(content_url)
if post_load_function:
print("Running domain's post-load function")
post_load_function(browser)
# Get a fresh copy of the page's metadata, if possible.
print("Retrieving DOM (post-onload)")
dom_tree = get_dom_tree(browser)
get_metadata(page_metadata, dom_tree)
with browser_running(browser):
inc_progress(capture_job, 0.5, "Checking for scroll-loaded assets")
repeat_while_exception(scroll_browser, arglist=[browser], raise_after_timeout=False)
inc_progress(capture_job, 1, "Fetching media")
with warn_on_exception("Error fetching media"):
dom_trees = get_all_dom_trees(browser)
media_urls = get_media_tags(dom_trees)
# grab all media urls that aren't already being grabbed,
# each in its own background thread
for media_url in media_urls - requested_urls:
add_thread(thread_list, ProxiedRequestThread(proxy_address, media_url, requested_urls, proxied_responses, capture_user_agent))
# Wait AFTER_LOAD_TIMEOUT seconds for any requests that are started shortly to finish
inc_progress(capture_job, 1, "Waiting for post-load requests")
# everything is slower via the proxy; give it time to catch up
if proxy:
time.sleep(settings.PROXY_POST_LOAD_DELAY)
unfinished_proxied_pairs = [pair for pair in proxied_pairs if not pair[1]]
load_time = time.time()
with browser_running(browser):
while unfinished_proxied_pairs and browser_still_running(browser):
if proxied_responses["limit_reached"]:
stop = True
print("Size limit reached: not waiting for additional pending requests.")
break
print("Waiting for %s pending requests" % len(unfinished_proxied_pairs))
# give up after AFTER_LOAD_TIMEOUT seconds
wait_time = time.time() - load_time
if wait_time > AFTER_LOAD_TIMEOUT:
stop = True
print("Waited %s seconds to finish post-load requests -- giving up." % AFTER_LOAD_TIMEOUT)
break
# Show progress to user
inc_progress(capture_job, wait_time/AFTER_LOAD_TIMEOUT, "Waiting for post-load requests")
# Sleep and update our list
time.sleep(.5)
unfinished_proxied_pairs = [pair for pair in unfinished_proxied_pairs if not pair[1]]
# screenshot capture of html pages (not pdf, etc.)
# (after all requests have loaded for best quality)
if have_html and browser_still_running(browser):
inc_progress(capture_job, 1, "Taking screenshot")
screenshot = get_screenshot(link, browser)
else:
safe_save_fields(link.screenshot_capture, status='failed')
except HaltCaptureException:
print("HaltCaptureException thrown")
except SoftTimeLimitExceeded:
capture_job.link.tags.add('timeout-failure')
except: # noqa
logger.exception(f"Exception while capturing job {capture_job.link_id}:")
finally:
try:
teardown(link, thread_list, browser, display, warcprox_controller, warcprox_thread)
# save page metadata
if have_html:
if page_metadata:
process_metadata(page_metadata, link)
else:
meta_tag_analysis_failed(link)
if have_content:
inc_progress(capture_job, 1, "Saving web archive file")
save_warc(warcprox_controller, capture_job, link, content_type, screenshot, successful_favicon_urls)
print("%s capture succeeded." % link.guid)
else:
print("%s capture failed." % link.guid)
except: # noqa
logger.exception(f"Exception while finishing job {capture_job.link_id}:")
finally:
capture_job.link.captures.filter(status='pending').update(status='failed')
if capture_job.status == 'in_progress':
capture_job.mark_failed('Failed during capture.')
run_task(run_next_capture.s())
@shared_task()
def update_stats():
"""
run once per minute by celerybeat. logs our minute-by-minute activity,
and also rolls our weekly stats (perma.models.WeekStats)
"""
# On the first minute of the new week, roll our weekly stats entry
now = timezone.now()
if now.weekday() == 6 and now.hour == 0 and now.minute == 0:
week_to_close = WeekStats.objects.latest('start_date')
week_to_close.end_date = now
week_to_close.save()
new_week = WeekStats(start_date=now)
new_week.save()
# We only need to keep a day of data for our visualization.
# TODO: this is 1560 minutes is 26 hours, that likely doesn't
# cover everyone outside of the east coast. Our vis should
# be timezone aware. Fix this.
if MinuteStats.objects.all().count() == 1560:
MinuteStats.objects.all()[0].delete()
# Add our new minute measurements
a_minute_ago = now - timedelta(seconds=60)
links_sum = Link.objects.filter(creation_timestamp__gt=a_minute_ago).count()
users_sum = LinkUser.objects.filter(date_joined__gt=a_minute_ago).count()
organizations_sum = Organization.objects.filter(date_created__gt=a_minute_ago).count()
registrars_sum = Registrar.objects.approved().filter(date_created__gt=a_minute_ago).count()
new_minute_stat = MinuteStats(links_sum=links_sum, users_sum=users_sum,
organizations_sum=organizations_sum, registrars_sum=registrars_sum)
new_minute_stat.save()
# Add our minute activity to our current weekly sum
if links_sum or users_sum or organizations_sum or registrars_sum:
current_week = WeekStats.objects.latest('start_date')
current_week.end_date = now
current_week.links_sum += links_sum
current_week.users_sum += users_sum
current_week.organizations_sum += organizations_sum
current_week.registrars_sum += registrars_sum
current_week.save()
@shared_task(acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
def cache_playback_status_for_new_links():
links = Link.objects.permanent().filter(cached_can_play_back__isnull=True)
queued = 0
for link_guid in links.values_list('guid', flat=True):
cache_playback_status.delay(link_guid)
queued = queued + 1
logger.info(f"Queued {queued} links to have their playback status cached.")
@shared_task(acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
def cache_playback_status(link_guid):
link = Link.objects.get(guid=link_guid)
link.cached_can_play_back = link.can_play_back()
if link.tracker.has_changed('cached_can_play_back'):
link.save(update_fields=['cached_can_play_back'])
@shared_task(acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
def delete_from_internet_archive(link_guid):
if not settings.UPLOAD_TO_INTERNET_ARCHIVE:
return
link = Link.objects.get(guid=link_guid)
item = internetarchive.get_item(link.ia_identifier)
metadata_identifiers = [
f"{link.ia_identifier}_meta.sqlite",
f"{link.ia_identifier}_meta.xml",
f"{link.ia_identifier}_files.xml"
]
if not item.exists:
logger.info(f"Link {link.guid} not present in IA: skipping.")
return False
link.internet_archive_upload_status = 'deleted'
for f in item.files:
# from https://internetarchive.readthedocs.io/en/latest/api.html#deleting, Note: Some system files, such as <itemname>_meta.xml, cannot be deleted.
if f['name'] in metadata_identifiers:
logger.info(f"Link {link.guid}: skipping deletion of metadata file {f['name']}.")
else:
ia_file = item.get_file(f['name'])
try:
logger.info(f"Link {link.guid}: deleting {f['name']}.")
ia_file.delete(
verbose=True,
cascade_delete=True,
access_key=settings.INTERNET_ARCHIVE_ACCESS_KEY,
secret_key=settings.INTERNET_ARCHIVE_SECRET_KEY,
)
except Exception:
link.internet_archive_upload_status = 'deletion_incomplete'
logger.exception(f"Link {link.guid}: attempt to delete file {f['name']} from Internet Archive failed:")
metadata = {
"description": "",
"contributor": "",
"sponsor": "",
"submitted_url": "",
"perma_url": "",
"title": "Removed",
"external-identifier": "",
"imagecount": "",
}
logger.info(f"Link {link.guid}: zeroing out metadata.")
try:
item.modify_metadata(
metadata,
access_key=settings.INTERNET_ARCHIVE_ACCESS_KEY,
secret_key=settings.INTERNET_ARCHIVE_SECRET_KEY,
)
except Exception:
link.internet_archive_upload_status = 'deletion_incomplete'
logger.exception(f"Link {link.guid}: attempt to zero out metadata on Internet Archive failed:")
link.save(update_fields=['internet_archive_upload_status'])
@shared_task(acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
def delete_all_from_internet_archive(guids=None, limit=None):
if not settings.UPLOAD_TO_INTERNET_ARCHIVE:
return
if guids:
links = Link.objects.filter(guid__in=guids)
else:
links = Link.objects.filter(internet_archive_upload_status__in=['deletion_required', 'deletion_incomplete'])
if limit:
links = links[:limit]
queued = 0
for link_guid in links.values_list('guid', flat=True):
delete_from_internet_archive.delay(link_guid)
queued = queued + 1
logger.info(f"Queued {queued} links for deletion from IA.")
@shared_task(acks_late=True) # use acks_late for tasks that can be safely re-run if they fail
def upload_all_to_internet_archive(limit=None):
if not settings.UPLOAD_TO_INTERNET_ARCHIVE:
return
links = Link.objects.visible_to_ia().filter(
internet_archive_upload_status__in=['not_started', 'failed', 'upload_or_reupload_required', 'deleted']
)
if limit:
links = links[:limit]
queued = 0
for link_guid in links.values_list('guid', flat=True):
upload_to_internet_archive.delay(link_guid)
queued = queued + 1
logger.info(f"Queued {queued} links for upload to IA.")
@shared_task()
def upload_to_internet_archive(link_guid):
"""
Call synchronously from the Django shell with the invocation:
>>> upload_to_internet_archive.apply(kwargs={"link_guid": 'AAAA-AAAA'})
"""
if not settings.UPLOAD_TO_INTERNET_ARCHIVE:
return
link = Link.objects.get(guid=link_guid)
if not link.can_upload_to_internet_archive():
logger.info(f"Queued Link {link_guid} no longer eligible for upload.")
return
url = remove_control_characters(link.submitted_url)
metadata = {
"collection": settings.INTERNET_ARCHIVE_COLLECTION,
"title": f"{link_guid}: {truncatechars(link.submitted_title, 50)}",
"mediatype": "web",
"description": f"Perma.cc archive of {url} created on {link.creation_timestamp}.",
"contributor": "Perma.cc",
"submitted_url": url,
"perma_url": protocol() + settings.HOST + reverse('single_permalink', args=[link.guid]),
"external-identifier": f"urn:X-perma:{link_guid}",
}
temp_warc_file = tempfile.TemporaryFile()
try:
item = internetarchive.get_item(link.ia_identifier)
if item.exists:
if not item.metadata.get('title') or item.metadata['title'] == 'Removed':
# if item already exists (but has been removed),
# ia won't update its metadata when we attempt to re-upload:
# we have to explicitly modify the metadata, then upload.
logger.info(f"Link {link_guid} previously removed from IA: updating metadata")
item.modify_metadata(
metadata,
access_key=settings.INTERNET_ARCHIVE_ACCESS_KEY,
secret_key=settings.INTERNET_ARCHIVE_SECRET_KEY,
)
else:
logger.info(f"Link {link_guid} was already uploaded to IA: skipping.")
return
# copy warc to local disk storage for upload
with default_storage.open(link.warc_storage_file()) as warc_file:
copy_file_data(warc_file, temp_warc_file)
temp_warc_file.seek(0)
logger.info(f"Uploading Link {link_guid} to IA.")
warc_name = os.path.basename(link.warc_storage_file())
response_list = internetarchive.upload(
link.ia_identifier,
{warc_name: temp_warc_file},
metadata=metadata,
access_key=settings.INTERNET_ARCHIVE_ACCESS_KEY,
secret_key=settings.INTERNET_ARCHIVE_SECRET_KEY,
retries=2,
retries_sleep=5,
verbose=True,
)
response_list[0].raise_for_status()
link.internet_archive_upload_status = 'completed'
except Exception:
logger.exception(f"Exception while uploading Link {link.guid} to IA:")
link.internet_archive_upload_status = 'failed'
finally:
temp_warc_file.close()
link.save(update_fields=['internet_archive_upload_status'])
@shared_task()
def send_js_errors():
"""
finds all uncaught JS errors recorded in the last week, sends a report if errors exist
"""
errors = UncaughtError.objects.filter(
created_at__gte=timezone.now() - timedelta(days=7),
resolved=False)
if errors:
formatted_errors = map(lambda err: err.format_for_reading(), errors)
send_self_email("Uncaught Javascript errors",
HttpRequest(),
'email/admin/js_errors.txt',
{'errors': formatted_errors})
return errors
@shared_task()
def verify_webrecorder_api_available():
"""
UptimeRobot-like helper to verify that the Webrecorder API is available.
Necessary because the api should not be exposed to the public internet.
"""
r = requests.get(
f'{settings.WR_API}.json',
timeout=10,
allow_redirects=False
)
r.raise_for_status()
assert "Webrecorder API" in r.text
@shared_task()
def sync_subscriptions_from_perma_payments():
"""
Perma only learns about changes to a customer's record in Perma
Payments when the user transacts with Perma. For admin convenience,
refresh Perma's records on demand.
"""
customers = LinkUser.objects.filter(in_trial=False)
for customer in customers:
try:
customer.get_subscription()
except PermaPaymentsCommunicationException:
# This gets logged inside get_subscription; don't duplicate logging here
pass
@shared_task(acks_late=True)
def populate_warc_size_fields(limit=None):
"""
One-time task, to populate the warc_size field for links where we missed it, the first time around.
See https://github.com/harvard-lil/perma/issues/2617 and https://github.com/harvard-lil/perma/issues/2172;
old links also often lack this metadata.
"""
links = Link.objects.filter(warc_size__isnull=True, cached_can_play_back=True)
if limit:
links = links[:limit]
queued = 0
for link_guid in links.values_list('guid', flat=True):
populate_warc_size.delay(link_guid)
queued = queued + 1
logger.info(f"Queued {queued} links for populating warc_size.")
@shared_task(acks_late=True)
def populate_warc_size(link_guid):
"""
One-time task, to populate the warc_size field for links where we missed it, the first time around.
See https://github.com/harvard-lil/perma/issues/2617 and https://github.com/harvard-lil/perma/issues/2172;
old links also often lack this metadata.
"""
link = Link.objects.get(guid=link_guid)
link.warc_size = default_storage.size(link.warc_storage_file())
link.save(update_fields=['warc_size'])
|
tests/end_to_end_tests/trainer_test.py | Deci-AI/super-gradients | 308 | 11160880 | <reponame>Deci-AI/super-gradients<filename>tests/end_to_end_tests/trainer_test.py<gh_stars>100-1000
import shutil
import unittest
import super_gradients
import torch
import os
from super_gradients import SgModel, ClassificationTestDatasetInterface
from super_gradients.training.metrics import Accuracy, Top5
class TestTrainer(unittest.TestCase):
@classmethod
def setUp(cls):
super_gradients.init_trainer()
# NAMES FOR THE EXPERIMENTS TO LATER DELETE
cls.folder_names = ['test_train', 'test_save_load', 'test_load_w', 'test_load_w2',
'test_load_w3', 'test_checkpoint_content', 'analyze']
cls.training_params = {"max_epochs": 1,
"silent_mode": True,
"lr_decay_factor": 0.1,
"initial_lr": 0.1,
"lr_updates": [4],
"lr_mode": "step",
"loss": "cross_entropy", "train_metrics_list": [Accuracy(), Top5()],
"valid_metrics_list": [Accuracy(), Top5()],
"loss_logging_items_names": ["Loss"], "metric_to_watch": "Accuracy",
"greater_metric_to_watch_is_better": True}
@classmethod
def tearDownClass(cls) -> None:
# ERASE ALL THE FOLDERS THAT WERE CREATED DURING THIS TEST
for folder in cls.folder_names:
if os.path.isdir(os.path.join('checkpoints', folder)):
shutil.rmtree(os.path.join('checkpoints', folder))
@staticmethod
def get_classification_trainer(name=''):
model = SgModel(name, model_checkpoints_location='local')
dataset_params = {"batch_size": 4}
dataset = ClassificationTestDatasetInterface(dataset_params=dataset_params)
model.connect_dataset_interface(dataset)
model.build_model("resnet18_cifar")
return model
def test_train(self):
model = self.get_classification_trainer(self.folder_names[0])
model.train(training_params=self.training_params)
def test_save_load(self):
model = self.get_classification_trainer(self.folder_names[1])
model.train(training_params=self.training_params)
model.build_model("resnet18_cifar", checkpoint_params={'load_checkpoint': True})
def test_load_only_weights_from_ckpt(self):
# Create a checkpoint with 100% accuracy
model = self.get_classification_trainer(self.folder_names[2])
params = self.training_params.copy()
params['max_epochs'] = 3
model.train(training_params=params)
# Build a model that continues the training
model = self.get_classification_trainer(self.folder_names[3])
model.build_model('resnet18_cifar', checkpoint_params={"load_checkpoint": True, "load_weights_only": False,
"source_ckpt_folder_name": self.folder_names[2]}
)
self.assertTrue(model.best_metric > -1)
self.assertTrue(model.start_epoch != 0)
# start_epoch is not initialized, adding to max_epochs
self.training_params['max_epochs'] += 3
model.train(training_params=self.training_params)
# Build a model that loads the weights and starts from scratch
model = self.get_classification_trainer(self.folder_names[4])
model.build_model('resnet18_cifar', checkpoint_params={"load_checkpoint": True, "load_weights_only": True,
"source_ckpt_folder_name": self.folder_names[2]}
)
self.assertTrue(model.best_metric == -1)
self.assertTrue(model.start_epoch == 0)
self.training_params['max_epochs'] += 3
model.train(training_params=self.training_params)
def test_checkpoint_content(self):
"""VERIFY THAT ALL CHECKPOINTS ARE SAVED AND CONTAIN ALL THE EXPECTED KEYS"""
model = self.get_classification_trainer(self.folder_names[5])
params = self.training_params.copy()
params["save_ckpt_epoch_list"] = [1]
model.train(training_params=params)
ckpt_filename = ['ckpt_best.pth', 'ckpt_latest.pth', 'ckpt_epoch_1.pth']
ckpt_paths = [os.path.join(model.checkpoints_dir_path, suf) for suf in ckpt_filename]
for ckpt_path in ckpt_paths:
ckpt = torch.load(ckpt_path)
self.assertListEqual(['net', 'acc', 'epoch', 'optimizer_state_dict', 'scaler_state_dict'],
list(ckpt.keys()))
model.save_checkpoint()
weights_only = torch.load(os.path.join(model.checkpoints_dir_path, 'ckpt_latest_weights_only.pth'))
self.assertListEqual(['net'], list(weights_only.keys()))
def test_compute_model_runtime(self):
model = self.get_classification_trainer(self.folder_names[6])
model.compute_model_runtime()
model.compute_model_runtime(batch_sizes=1, input_dims=(3, 224, 224), verbose=False)
model.compute_model_runtime(batch_sizes=[1, 2, 3], verbose=True)
# VERIFY MODEL RETURNS TO PREVIOUS TRAINING MODE
model.net.train()
model.compute_model_runtime(batch_sizes=1, verbose=False)
assert model.net.training, 'MODEL WAS SET TO eval DURING compute_model_runtime, BUT DIDN\'t RETURN TO PREVIOUS'
model.net.eval()
model.compute_model_runtime(batch_sizes=1, verbose=False)
assert not model.net.training, 'MODEL WAS SET TO eval DURING compute_model_runtime, BUT RETURNED TO TRAINING'
# THESE SHOULD HANDLE THE EXCEPTION OF CUDA OUT OF MEMORY
if torch.cuda.is_available():
model._switch_device('cuda')
model.compute_model_runtime(batch_sizes=10000, verbose=False, input_dims=(3, 224, 224))
model.compute_model_runtime(batch_sizes=[10000, 10, 50, 100, 1000, 5000], verbose=True)
def test_predict(self):
model = self.get_classification_trainer(self.folder_names[6])
inputs = torch.randn((5, 3, 32, 32))
targets = torch.randint(0, 5, (5, 1))
model.predict(inputs=inputs, targets=targets)
model.predict(inputs=inputs, targets=targets, half=True)
model.predict(inputs=inputs, targets=targets, half=False, verbose=True)
if __name__ == '__main__':
unittest.main()
|
deepmath/deephol/prune_lib.py | LaudateCorpus1/deepmath | 830 | 11160884 | <gh_stars>100-1000
"""Proof pruning library.
The purpose of this library is to optimize proofs. Currently we
minimize the number of tactic application parameters in oder to generate
better training data (with minimum number of tactic parameters).
"""
from __future__ import absolute_import
from __future__ import division
# Import Type Annotations
from __future__ import print_function
import time
import tensorflow as tf
from typing import List, Text
from deepmath.deephol.public import proof_assistant
from deepmath.deephol import deephol_pb2
from deepmath.deephol import prover_util
from deepmath.proof_assistant import proof_assistant_pb2
from deepmath.public import error
MIN_HARD_NEGATIVES = 5
MAX_HARD_NEGATIVES = 10
def _create_request(goal: proof_assistant_pb2.Theorem, tactic: Text,
params: List[proof_assistant_pb2.Theorem]
) -> proof_assistant_pb2.ApplyTacticRequest:
tactic = ('%s [ %s ]' % (tactic, ' ; '.join(
['THM %d' % thm.fingerprint for thm in params]))).replace(' ', ' ')
return proof_assistant_pb2.ApplyTacticRequest(
goal=prover_util.theorem_to_goal_proto(goal), tactic=tactic)
def _matches_subgoal(goal: proof_assistant_pb2.Theorem,
thm: proof_assistant_pb2.Theorem):
return (set(list(goal.hypotheses)) == set(list(thm.hypotheses)) and
goal.conclusion == thm.conclusion)
class ParameterPruning(object):
"""Class to do parameter pruning on proof nodes."""
def __init__(self,
theorem_db: proof_assistant_pb2.TheoremDatabase,
hol_wrapper=None):
if hol_wrapper and theorem_db:
tf.logging.warning(
'theorem_db provided will be ignored as hol_wrapper provided.')
self.hol_wrapper = hol_wrapper
if not self.hol_wrapper:
self.hol_wrapper = proof_assistant.ProofAssistant()
for theorem in theorem_db.theorems:
self.hol_wrapper.RegisterTheorem(
proof_assistant_pb2.RegisterTheoremRequest(theorem=theorem))
self.communication_failed = False
def prune_tactic_application(self, goal: proof_assistant_pb2.Theorem,
tapp: deephol_pb2.TacticApplication):
"""Parameter pruning for a single tactic application.
Args:
goal: Goal of the ProofNode to which the tactic application belongs.
tapp: The tactic application to be pruned.
"""
if self.communication_failed:
tf.logging.error('Communication with prover failed. Not pruning...')
return
tactic = tapp.tactic
parameters = tapp.parameters
if not parameters:
return
assert len(parameters) == 1
param = parameters[0]
if param.parameter_type != deephol_pb2.Tactic.THEOREM_LIST:
return
thms = list(param.theorems)
if not thms:
return
index = len(thms) - 1
tactic = tapp.tactic
time_spent = tapp.time_spent
false_positives = []
other_negatives = []
found_true_positive = False
while index >= 0:
thm = thms.pop(index)
request = _create_request(goal, str(tactic), thms)
start_time = time.time()
response = proof_assistant_pb2.ApplyTacticResponse()
try:
response = self.hol_wrapper.ApplyTactic(request)
elapsed_msecs = int((time.time() - start_time) * 1000.0 + 0.5)
time_spent = elapsed_msecs
except error.StatusNotOk as exception:
tf.logging.error(exception)
elapsed_msecs = int((time.time() - start_time) * 1000.0 + 0.5)
if exception.message.startswith(
'Communication') and exception.message.endswith('failed.'):
tf.logging.error('Communication with prover failed. Not pruning...')
self.communication_failed = True
return
if response.HasField('error'):
thms.insert(index, thm)
found_true_positive = True
index -= 1
continue
assert response.HasField('goals'), 'response: %s' % response
new_subgoals = list(response.goals.goals)
no_match = False
if len(new_subgoals) == len(tapp.subgoals):
for i, sg in enumerate(new_subgoals):
if not _matches_subgoal(sg, tapp.subgoals[i]):
no_match = True
break
else:
no_match = True
if no_match:
thms.insert(index, thm)
found_true_positive = True
else:
if found_true_positive:
false_positives.append(thm)
else:
other_negatives.append(thm)
time_spent = elapsed_msecs
index -= 1
del tapp.parameters[0].theorems[:]
tapp.parameters[0].theorems.extend(thms)
tapp.parameters[0].hard_negative_theorems.extend(
false_positives[:MAX_HARD_NEGATIVES])
if len(false_positives) < MIN_HARD_NEGATIVES:
other_negatives.reverse()
tapp.parameters[0].hard_negative_theorems.extend(
other_negatives[:(MIN_HARD_NEGATIVES - len(false_positives))])
tapp.time_spent = time_spent
def prune_tactic_applications(self, proof_node: deephol_pb2.ProofNode):
for proof in proof_node.proofs:
if proof.result == deephol_pb2.TacticApplication.SUCCESS:
self.prune_tactic_application(proof_node.goal, proof)
def prune_closed_tactic_applications(self, proof_node: deephol_pb2.ProofNode):
for proof in proof_node.proofs:
if proof.closed:
assert proof.result == deephol_pb2.TacticApplication.SUCCESS
self.prune_tactic_application(proof_node.goal, proof)
|
examples/model_compress/pruning/mobilenetv2_end2end/pretrain.py | dutxubo/nni | 9,680 | 11160894 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import argparse
from time import gmtime, strftime
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
from utils import *
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def run_validation(model, valid_dataloader):
model.eval()
loss_func = nn.CrossEntropyLoss()
acc_list, loss_list = [], []
with torch.no_grad():
for i, (inputs, labels) in enumerate(tqdm(valid_dataloader)):
inputs, labels = inputs.float().to(device), labels.to(device)
preds= model(inputs)
pred_idx = preds.max(1).indices
acc = (pred_idx == labels).sum().item() / labels.size(0)
acc_list.append(acc)
loss = loss_func(preds, labels).item()
loss_list.append(loss)
valid_loss = np.array(loss_list).mean()
valid_acc = np.array(acc_list).mean()
return valid_loss, valid_acc
def run_pretrain(args):
print(args)
torch.set_num_threads(args.n_workers)
model_type = 'mobilenet_v2_torchhub'
pretrained = True # load imagenet weight
experiment_dir = 'pretrained_{}'.format(model_type) if args.experiment_dir is None else args.experiment_dir
os.mkdir(experiment_dir)
checkpoint = None
input_size = 224
n_classes = 120
log = open(experiment_dir + '/pretrain.log', 'w')
model = create_model(model_type=model_type, pretrained=pretrained, n_classes=n_classes,
input_size=input_size, checkpoint=checkpoint)
model = model.to(device)
print(model)
# count_flops(model, device=device)
train_dataset = TrainDataset('./data/stanford-dogs/Processed/train')
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
valid_dataset = EvalDataset('./data/stanford-dogs/Processed/valid')
valid_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
best_valid_acc = 0.0
for epoch in range(args.n_epochs):
print('Start training epoch {}'.format(epoch))
loss_list = []
# train
model.train()
for i, (inputs, labels) in enumerate(tqdm(train_dataloader)):
optimizer.zero_grad()
inputs, labels = inputs.float().to(device), labels.to(device)
preds = model(inputs)
loss = criterion(preds, labels)
loss_list.append(loss.item())
loss.backward()
optimizer.step()
# validation
valid_loss, valid_acc = run_validation(model, valid_dataloader)
train_loss = np.array(loss_list).mean()
print('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}'.format
(epoch, train_loss, valid_loss, valid_acc))
log.write('Epoch {}: train loss {:.4f}, valid loss {:.4f}, valid acc {:.4f}\n'.format
(epoch, train_loss, valid_loss, valid_acc))
# save
if valid_acc > best_valid_acc:
best_valid_acc = valid_acc
torch.save(model.state_dict(), experiment_dir + '/checkpoint_best.pt')
log.close()
def parse_args():
parser = argparse.ArgumentParser(description='Example code for pruning MobileNetV2')
parser.add_argument('--experiment_dir', type=str, default=None,
help='directory containing the pretrained model')
parser.add_argument('--checkpoint_name', type=str, default='checkpoint_best.pt',
help='checkpoint of the pretrained model')
# finetuning parameters
parser.add_argument('--n_workers', type=int, default=16,
help='number of threads')
parser.add_argument('--n_epochs', type=int, default=180,
help='number of epochs to train the model')
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--weight_decay', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training and inference')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
run_pretrain(args)
|
tests/commands/test_account.py | ftupas/nile | 121 | 11160929 | <filename>tests/commands/test_account.py<gh_stars>100-1000
"""Tests for account commands."""
from unittest.mock import MagicMock, patch
import pytest
from nile.core.account import Account
KEY = "TEST_KEY"
NETWORK = "goerli"
MOCK_ADDRESS = "0x123"
MOCK_INDEX = 0
@pytest.fixture(autouse=True)
def tmp_working_dir(monkeypatch, tmp_path):
monkeypatch.chdir(tmp_path)
return tmp_path
@patch("nile.core.account.Account.deploy")
def test_account_init(mock_deploy):
mock_deploy.return_value = MOCK_ADDRESS, MOCK_INDEX
account = Account(KEY, NETWORK)
assert account.address == MOCK_ADDRESS
assert account.index == MOCK_INDEX
mock_deploy.assert_called_once()
def test_account_multiple_inits_with_same_key():
account = Account(KEY, NETWORK)
account.deploy()
account2 = Account(KEY, NETWORK)
# Check addresses don't match
assert account.address != account2.address
# Check indexing
assert account.index == 0
assert account2.index == 1
@patch("nile.core.account.deploy", return_value=(1, 2))
def test_deploy(mock_deploy):
account = Account(KEY, NETWORK)
with patch("nile.core.account.os.path.dirname") as mock_path:
test_path = "/overriding_path"
mock_path.return_value.replace.return_value = test_path
account.deploy()
mock_deploy.assert_called_with(
"Account",
[str(account.signer.public_key)],
NETWORK,
f"account-{account.index + 1}",
(f"{test_path}/artifacts", f"{test_path}/artifacts/abis"),
)
@patch("nile.core.account.deploy", return_value=(MOCK_ADDRESS, MOCK_INDEX))
@patch("nile.core.account.accounts.register")
def test_deploy_accounts_register(mock_register, mock_deploy):
account = Account(KEY, NETWORK)
mock_register.assert_called_once_with(
account.signer.public_key, MOCK_ADDRESS, MOCK_INDEX, NETWORK
)
@patch("nile.core.account.call_or_invoke")
def test_send_nonce_call(mock_call):
account = Account(KEY, NETWORK)
contract_address, _ = account.deploy()
# Instead of creating and populating a tmp .txt file, this uses the
# deployed account address (contract_address) as the target
account.send(contract_address, "method", [1, 2, 3])
# 'call_or_invoke' is called twice ('get_nonce' and '__execute__')
assert mock_call.call_count == 2
# Check 'get_nonce' call
mock_call.assert_any_call(account.address, "call", "get_nonce", [], NETWORK)
@pytest.mark.parametrize(
"callarray, calldata",
# The following callarray and calldata args tests the Account's list comprehensions
# ensuring they're set to strings and passed correctly
[([[111]], []), ([[111, 222]], [333, 444, 555])],
)
def test_send_sign_transaction_and_execute(callarray, calldata):
account = Account(KEY, NETWORK)
contract_address, _ = account.deploy()
sig_r, sig_s = [999, 888]
return_signature = [callarray, calldata, sig_r, sig_s]
account.signer.sign_transaction = MagicMock(return_value=return_signature)
with patch("nile.core.account.call_or_invoke") as mock_call:
send_args = [contract_address, "method", [1, 2, 3]]
nonce = 4
account.send(*send_args, nonce)
# Check values are correctly passed to 'sign_transaction'
account.signer.sign_transaction.assert_called_once_with(
calls=[send_args], nonce=nonce, sender=account.address
)
# Check values are correctly passed to '__execute__'
mock_call.assert_called_with(
contract=account.address,
method="__execute__",
network=NETWORK,
params=[
str(len(callarray)),
*(str(elem) for sublist in callarray for elem in sublist),
str(len(calldata)),
*(str(param) for param in calldata),
str(nonce),
],
signature=[str(sig_r), str(sig_s)],
type="invoke",
)
|
exercises/IPython Kernel/soln/mycircle.py | kaishuocheng/jupyter | 748 | 11160939 | <filename>exercises/IPython Kernel/soln/mycircle.py
class MyCircle(object):
def __init__(self, center=(0.0,0.0), radius=1.0, color='blue'):
self.center = center
self.radius = radius
self.color = color
def _repr_html_(self):
return "○ (<b>html</b>)"
def _repr_svg_(self):
return """<svg width="100px" height="100px">
<circle cx="50" cy="50" r="20" stroke="black" stroke-width="1" fill="blue"/>
</svg>"""
def _repr_latex_(self):
return r"$\bigcirc \LaTeX$"
def _repr_javascript_(self):
return "alert('I am a circle!');"
|
recipe_scrapers/settings/v12_settings.py | mathiazom/recipe-scrapers | 811 | 11160966 | <filename>recipe_scrapers/settings/v12_settings.py
# Settings that will make recipe-scrapers>=13.0.0 act almost identical as recipe-scrapers<13.0.0
SUPPRESS_EXCEPTIONS = True
META_HTTP_EQUIV = True
ON_EXCEPTION_RETURN_VALUES = {
"title": "",
"total_time": 0,
"yields": "",
"image": "",
"ingredients": [],
"instructions": "",
"ratings": -1,
"reviews": None,
"links": [],
"language": "en",
"nutrients": {},
}
|
torchio/datasets/itk_snap/__init__.py | siahuat0727/torchio | 1,340 | 11160989 | <reponame>siahuat0727/torchio
from .itk_snap import BrainTumor, T1T2, AorticValve
__all__ = [
'BrainTumor',
'T1T2',
'AorticValve',
]
|
src/lib/dataset/datasets/youtube_vis.py | jie311/TraDeS | 475 | 11161081 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
from collections import defaultdict
from ..generic_dataset import GenericDataset
class youtube_vis(GenericDataset):
num_categories = 40
default_resolution = [352, 640]
class_name = ['person','giant_panda','lizard','parrot','skateboard','sedan',
'ape','dog','snake','monkey','hand','rabbit','duck','cat','cow','fish',
'train','horse','turtle','bear','motorbike','giraffe','leopard',
'fox','deer','owl','surfboard','airplane','truck','zebra','tiger',
'elephant','snowboard','boat','shark','mouse','frog','eagle','earless_seal',
'tennis_racket']
max_objs = 50
cat_ids = {i + 1: i + 1 for i in range(num_categories)}
def __init__(self, opt, split):
self.dataset_version = opt.dataset_version
print('Using Youtube-VIS')
data_dir = os.path.join(opt.data_dir, 'youtube_vis')
if opt.dataset_version in ['train', 'val']:
ann_file = '{}.json'.format(opt.dataset_version)
img_dir = os.path.join(data_dir, '{}/JPEGImages/'.format(opt.dataset_version))
print('ann_file', ann_file)
ann_path = os.path.join(data_dir, 'annotations', ann_file)
self.images = None
# load image list and coco
super(youtube_vis, self).__init__(opt, split, ann_path, img_dir)
self.num_samples = len(self.images)
print('Loaded Youtube-VIS {} {} {} samples'.format(
self.dataset_version, split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def __len__(self):
return self.num_samples
def run_eval(self, results, save_dir):
print('Finised')
|
api.py | soywod/iris | 149 | 11161099 | <gh_stars>100-1000
#!/usr/bin/env python3
import json
import logging
import os
import quopri
import re
import smtplib
import subprocess
import sys
import threading
from base64 import b64decode
from email import policy
from email.header import Header, decode_header
from email.mime.text import MIMEText
from email.parser import BytesParser, BytesHeaderParser
from email.utils import formataddr, formatdate, make_msgid
from imapclient.imapclient import IMAPClient
log_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), "iris-api.log")
logging.basicConfig(filename=log_filename, format="[%(asctime)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S")
imap_client = None
imap_host = imap_port = imap_login = imap_passwd = None
smtp_host = smtp_port = smtp_login = smtp_passwd = <PASSWORD>
no_reply_pattern = r"^.*no[\-_ t]*reply"
def get_contacts():
contacts = set()
fetch = imap_client.fetch("1:*", ["ENVELOPE"])
for [_, data] in fetch.items():
envelope = data[b"ENVELOPE"]
contacts = contacts.union(decode_contacts(envelope.to))
return list(contacts)
def get_emails(last_seq, chunk_size):
global imap_client
emails = []
if last_seq == 0:
return emails
ids = "%d:%d" % (last_seq, last_seq - chunk_size) if (last_seq > chunk_size) else "%d:%d" % (last_seq, 1)
fetch = imap_client.fetch(ids, ["FLAGS", "ENVELOPE", "INTERNALDATE", "BODY.PEEK[HEADER]", "BODYSTRUCTURE", "FLAGS"])
for [uid, data] in fetch.items():
header = BytesHeaderParser(policy=policy.default).parsebytes(data[b"BODY[HEADER]"])
envelope = data[b"ENVELOPE"]
logging.info(data[b"BODYSTRUCTURE"])
struct = data[b"BODYSTRUCTURE"][0] if isinstance(data[b"BODYSTRUCTURE"][0], list) else []
has_attachment = len([mime[0] for mime in struct if mime[0] and mime[0] not in [b"text", b"multipart"]]) > 0
subject = decode_byte(envelope.subject)
from_ = envelope.from_[0]
from_ = "@".join([decode_byte(from_.mailbox), decode_byte(from_.host)])
to = envelope.to[0]
to = "@".join([decode_byte(to.mailbox), decode_byte(to.host)])
date_ = data[b"INTERNALDATE"].strftime("%d/%m/%y, %Hh%M")
email = dict()
email["id"] = uid
email["subject"] = subject
email["from"] = from_
email["to"] = to
email["date"] = date_
email["flags"] = get_flags_str(data[b"FLAGS"], has_attachment)
email["message-id"] = envelope.message_id.decode()
email["reply-to"] = header["Reply-To"] if "Reply-To" in header else None
emails.insert(0, email)
return emails
def get_email(id, format):
global imap_client
fetch = imap_client.fetch([id], ["BODY[]"])
content = get_email_content(id, fetch.popitem()[1][b"BODY[]"])
return content[format]
def get_flags_str(flags, has_attachment):
flags_str = ""
flags_str += "N" if not b"\\Seen" in flags else " "
flags_str += "R" if b"\\Answered" in flags else " "
flags_str += "F" if b"\\Flagged" in flags else " "
flags_str += "D" if b"\\Draft" in flags else " "
flags_str += "@" if has_attachment else " "
return flags_str
def download_attachments(dir, uid, data):
attachments = []
email = BytesParser(policy=policy.default).parsebytes(data)
for part in email.walk():
if part.is_attachment():
attachment_name = part.get_filename()
attachment = open(os.path.expanduser(os.path.join(dir, attachment_name)), "wb")
attachment.write(part.get_payload(decode=True))
attachment.close()
attachments.append(attachment_name)
return attachments
def get_email_content(uid, data):
content = dict(text=None, html=None)
email = BytesParser(policy=policy.default).parsebytes(data)
for part in email.walk():
if part.is_multipart():
continue
if part.get_content_type() == "text/plain":
content["text"] = read_text(part)
continue
if part.get_content_type() == "text/html":
content["html"] = read_html(part, uid)
continue
if content["html"] and not content["text"]:
tmp = open(content["html"], "r")
content["text"] = tmp.read()
tmp.close()
return content
def read_text(part):
payload = part.get_payload(decode=True)
return payload.decode(part.get_charset() or part.get_content_charset() or "utf-8")
def read_html(part, uid):
payload = read_text(part)
preview = write_preview(payload.encode(), uid)
return preview
def write_preview(payload, uid, subtype="html"):
preview = "/tmp/preview-%d.%s" % (uid, subtype)
if not os.path.exists(preview):
tmp = open(preview, "wb")
tmp.write(payload)
tmp.close()
return preview
def decode_byte(byte):
decode_list = decode_header(byte.decode())
def _decode_byte(byte_or_str, encoding):
return byte_or_str.decode(encoding or "utf-8") if type(byte_or_str) is bytes else byte_or_str
return "".join([_decode_byte(val, encoding) for val, encoding in decode_list])
def decode_contacts(contacts):
return list(filter(None.__ne__, [decode_contact(c) for c in contacts or []]))
def decode_contact(contact):
if not contact.mailbox or not contact.host: return None
mailbox = decode_byte(contact.mailbox)
if re.match(no_reply_pattern, mailbox): return None
host = decode_byte(contact.host)
if re.match(no_reply_pattern, host): return None
return "@".join([mailbox, host]).lower()
class PreventLogout(threading.Thread):
def __init__(self):
self.event = threading.Event()
super(PreventLogout, self).__init__()
self.start()
def run(self):
global imap_client
while not self.event.wait(60):
logging.info("NOOP")
imap_client.noop()
while True:
request_raw = sys.stdin.readline()
try: request = json.loads(request_raw.rstrip())
except: continue
logging.info("Receive: " + str({key: request[key] for key in request if key not in ["imap-passwd", "smtp-passwd"]}))
if request["type"] == "login":
try:
imap_host = request["imap-host"]
imap_port = request["imap-port"]
imap_login = request["imap-login"]
imap_passwd = request["imap-passwd"]
smtp_host = request["smtp-host"]
smtp_port = request["smtp-port"]
smtp_login = request["smtp-login"]
smtp_passwd = request["smtp-passwd"]
imap_client = IMAPClient(host=imap_host, port=imap_port)
imap_client.login(imap_login, imap_passwd)
PreventLogout()
folders = list(map(lambda folder: folder[2], imap_client.list_folders()))
response = dict(success=True, type="login", folders=folders)
except Exception as error:
response = dict(success=False, type="login", error=str(error))
elif request["type"] == "fetch-emails":
try:
emails = get_emails(request["seq"], request["chunk-size"])
response = dict(success=True, type="fetch-emails", emails=emails)
except Exception as error:
response = dict(success=False, type="fetch-emails", error=str(error))
elif request["type"] == "fetch-email":
try:
email = get_email(request["id"], request["format"])
response = dict(success=True, type="fetch-email", email=email, format=request["format"])
except Exception as error:
response = dict(success=False, type="fetch-email", error=str(error))
elif request["type"] == "download-attachments":
try:
fetch = imap_client.fetch([request["id"]], ["BODY[]"])
attachments = download_attachments(request["dir"], request["id"], fetch.popitem()[1][b"BODY[]"])
response = dict(success=True, type="download-attachments", attachments=attachments)
except Exception as error:
response = dict(success=False, type="download-attachments", error=str(error))
elif request["type"] == "select-folder":
try:
folder = request["folder"]
seq = imap_client.select_folder(folder)[b"UIDNEXT"]
emails = get_emails(seq, request["chunk-size"])
is_folder_selected = True
response = dict(success=True, type="select-folder", folder=folder, seq=seq, emails=emails)
except Exception as error:
response = dict(success=False, type="select-folder", error=str(error))
elif request["type"] == "send-email":
try:
message = MIMEText(request["message"])
for key, val in request["headers"].items(): message[key] = val
message["From"] = formataddr((request["from"]["name"], request["from"]["email"]))
message["Message-Id"] = make_msgid()
smtp = smtplib.SMTP(host=smtp_host, port=smtp_port)
smtp.starttls()
smtp.login(smtp_login, smtp_passwd)
smtp.send_message(message)
smtp.quit()
imap_client.append("Sent", message.as_string())
contacts_file = open(os.path.dirname(sys.argv[0]) + "/.contacts", "a")
contacts_file.write(request["headers"]["To"] + "\n")
contacts_file.close()
response = dict(success=True, type="send-email")
except Exception as error:
response = dict(success=False, type="send-email", error=str(error))
elif request["type"] == "extract-contacts":
try:
contacts = get_contacts()
contacts_file = open(os.path.dirname(sys.argv[0]) + "/.contacts", "w+")
for contact in contacts: contacts_file.write(contact + "\n")
contacts_file.close()
response = dict(success=True, type="extract-contacts")
except Exception as error:
response = dict(success=False, type="extract-contacts", error=str(error))
json_response = json.dumps(response)
logging.info("Send: " + str(json_response))
sys.stdout.write(json_response + "\n")
sys.stdout.flush()
|
util/clevr_test/CLEVR_eval.py | YuJiang01/n2nnmn | 299 | 11161116 | from __future__ import print_function
import argparse
import json
from collections import defaultdict
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--questions_file', required=True)
parser.add_argument('--answers_file', required=True)
def main(args):
# Load true answers from questions file
true_answers = []
with open(args.questions_file, 'r') as f:
questions = json.load(f)['questions']
for q in questions:
true_answers.append(q['answer'])
correct_by_q_type = defaultdict(list)
# Load predicted answers
predicted_answers = []
with open(args.answers_file, 'r') as f:
for line in f:
predicted_answers.append(line.strip())
num_true, num_pred = len(true_answers), len(predicted_answers)
assert num_true == num_pred, 'Expected %d answers but got %d' % (
num_true, num_pred)
for i, (true_answer, predicted_answer) in enumerate(zip(true_answers, predicted_answers)):
correct = 1 if true_answer == predicted_answer else 0
correct_by_q_type['Overall'].append(correct)
q_type = questions[i]['program'][-1]['function']
correct_by_q_type[q_type].append(correct)
for q_type, vals in sorted(correct_by_q_type.items()):
vals = np.asarray(vals)
print(q_type, '%d / %d = %.2f' % (vals.sum(), vals.shape[0], 100.0 * vals.mean()))
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
pyscf/pbc/dft/test/test_gamma_vs_ks.py | robert-anderson/pyscf | 501 | 11161130 | <gh_stars>100-1000
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
import unittest
import pyscf.pbc.gto as pbcgto
from pyscf.pbc import dft as pdft
from pyscf.pbc import tools as ptools
cell = pbcgto.Cell()
cell.atom = '''
C 0 0 0
C 1.685068785275 1.685068785275 1.685068785275'''
cell.a = '''
0.000000000, 3.370137571, 3.370137571
3.370137571, 0.000000000, 3.370137571
3.370137571, 3.370137571, 0.000000000
'''
cell.basis = 'gth-szv'
cell.unit = 'B'
cell.pseudo = 'gth-pade'
cell.mesh = [25]*3
cell.verbose = 0
cell.build()
class KnowValues(unittest.TestCase):
def test_gamma_vs_ks_high_cost(self):
mf = pdft.KRKS(cell)
mf.kpts = cell.make_kpts([1,1,3])
ek = mf.kernel()
scell = ptools.super_cell(cell, [1,1,3])
scell.mesh = [25,25,73]
mf = pdft.RKS(scell)
eg = mf.kernel()
self.assertAlmostEqual(ek, eg/3, 5)
if __name__ == '__main__':
print("Full Tests for gamma point vs k-points")
unittest.main()
|
pyxb/bundles/opengis/gml_3_3/ce.py | eLBati/pyxb | 123 | 11161151 | <filename>pyxb/bundles/opengis/gml_3_3/ce.py
from pyxb.bundles.opengis.gml_3_3.raw.ce import *
|
dictionary/urls/edit.py | ankitgc1/django-sozluk-master | 248 | 11161161 | from django.urls import path
from dictionary.views.edit import CommentCreate, CommentUpdate, EntryCreate, EntryUpdate
from dictionary.views.reporting import GeneralReportView
urlpatterns_edit = [
path("entry/update/<int:pk>/", EntryUpdate.as_view(), name="entry_update"),
path("entry/create/", EntryCreate.as_view(), name="entry_create"),
path("entry/<int:pk>/comment/", CommentCreate.as_view(), name="comment_create"),
path("entry/comment/edit/<int:pk>/", CommentUpdate.as_view(), name="comment_update"),
path("contact/", GeneralReportView.as_view(), name="general-report"),
]
|
fum/urls.py | jsavikko/futurice-ldap-user-manager | 111 | 11161176 | <gh_stars>100-1000
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.template import add_to_builtins
from django.contrib import admin
admin.autodiscover()
from views import IndexView
urlpatterns = patterns('',
url(r'^$', IndexView.as_view(), name='index'),
url(r'^', include('fum.common.urls')),
url(r'^users/', include('fum.users.urls')),
url(r'^groups/', include('fum.groups.urls')),
url(r'^servers/', include('fum.servers.urls')),
url(r'^projects/', include('fum.projects.urls')),
url(r'^api/', include('fum.api.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^rest-api/', include('rest_framework_docs.urls')),
url(r'^hsearch/', include('haystack.urls')),
url(r'^history/', include('djangohistory.urls')),
)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if settings.DEBUG:
try:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
except Exception, e:
print("Debug Toolbar not in use, ignored")
add_to_builtins('fum.common.templatetags.tags')
|
library/oci_instance.py | slmjy/oci-ansible-modules | 106 | 11161210 | #!/usr/bin/python
# Copyright (c) 2017, 2020, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_instance
short_description: Launch, terminate and control the lifecycle of OCI Compute instances
description:
- This module allows the user to launch/create, terminate and perform other power actions on OCI Compute Service
instances. An instance represents a compute host. The image used to launch the instance determines its operating
system and other software. The shape specified during the launch process determines the number of CPUs and memory
allocated to the instance. For more information, see Overview of the Compute Service at
U(https://docs.us-phoenix-1.oraclecloud.com/Content/Compute/Concepts/computeoverview.htm). In experimental mode,
this module also allows attaching/detaching volumes and boot volumes to an instance.
version_added: "2.5"
options:
availability_domain:
description: The Availability Domain of the instance. Required when creating a compute instance with
I(state=present).
boot_volume_details:
description: Details for attaching/detaching a boot volume to/from an instance. I(boot_volume_details) is
mutually exclusive with I(image_id). This option is only supported in experimental mode. To use
an experimental feature, set the environment variable OCI_ANSIBLE_EXPERIMENTAL to True.
suboptions:
attachment_state:
description: Attach a boot volume to the instance I(instance_id) with I(attachment_state=present).
Detach a boot volume from the instance I(instance_id) with I(attachment_state=absent).
default: present
choices: ['present', 'absent']
boot_volume_id:
description: The OCID of the boot volume.
required: true
compartment_id:
description: The OCID of the compartment. Required when I(state=present).
extended_metadata:
description: Additional metadata key/value pairs that you provide. They serve a similar purpose and
functionality from fields in the I(metadata) object. They are distinguished from I(metadata)
fields in that these can be nested JSON objects (whereas 'metadata' fields are string/string maps
only).
If you don't need nested metadata values, it is strongly advised to avoid using this object and
use the Metadata object instead.
fault_domain:
description: A fault domain is a grouping of hardware and infrastructure within an availability domain. Each
availability domain contains three fault domains. Fault domains let you distribute your instances
so that they are not on the same physical hardware within a single availability domain. A hardware
failure or Compute hardware maintenance that affects one fault domain does not affect instances in
other fault domains. If you do not specify the fault domain, the system selects one for you. To
change the fault domain for an instance, terminate it and launch a new instance in the preferred
fault domain. To get a list of fault domains, use M(oci_fault_domain_facts).
metadata:
description: A hash/dictionary of custom key/value pairs that are associated with the instance. This
option is also used to provide information to cloud-init and specifying
"ssh_authorized_keys" for the default user of the instance. This hash is specified
as '{"key":"value"}' and '{"key":"value","key":"value"}'.
display_name:
description: A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential
information.
aliases: ['name']
image_id:
description: The OCID of the image used to boot the instance. I(image_id) is mutually exclusive with
I(boot_volume_details) and I(source_details). This option is deprecated. Use I(source_details)
with I(source_type=image) instead.
instance_id:
description: The OCID of the compute instance. Required for updating an existing compute instance
when I(state=present), for performing power actions (such as start, stop, softreset
or reset) on an instance, and for terminating an instance I(state=absent).
aliases: [ 'id' ]
ipxe_script:
description: custom iPXE script that will run when the instance boots.
preserve_boot_volume:
description: Whether to preserve the boot volume when terminating an instance with I(state=absent).
default: False
type: bool
shape:
description: The shape of the instance. Required when creating a compute instance with I(state=present).
source_details:
description: Details for creating an instance. Use this parameter to specify whether a boot volume or an image
should be used to launch a new instance.
required: true
suboptions:
source_type:
description: The source type for the instance. Use image when specifying the image OCID. Use bootVolume
when specifying the boot volume OCID.
required: true
choices: ['image', 'bootVolume']
image_id:
description: The OCID of the image used to boot the instance. Required if I(source_type) is "image".
boot_volume_size_in_gbs:
description: The size of the boot volume in GBs. Minimum value is 50 GB and maximum value is
16384 GB (16TB). Applicable only when I(source_type=image).
boot_volume_id:
description: The OCID of the boot volume used to boot the instance. Required if I(source_type) is
"bootVolume".
state:
description: The state of the instance that must be asserted to. When I(state=present), and the
compute instance doesn't exist, the instance is launched/created with the specified
details. When I(state=absent), the compute instance is terminated. When
I(state=stopped), the compute instance is powered off. When I(state=running), the
compute instance is powered on. When I(state=softreset), an ACPI shutdown is
initiated and the compute instance is powered on. When I(state=reset), the
compute instance is powered off and then powered on.
Note that I(state=softreset) and I(state=reset) states are not idempotent. Every time a play is
executed with these C(state) options, a shutdown and a power on sequence is executed against the
instance.
default: "present"
choices: ['present', 'absent', 'running', 'reset', 'softreset', 'stopped']
volume_details:
description: Details for attaching or detaching a volume to an instance with I(state=present) or
I(state=RUNNING). This option is only supported in experimental mode. To use an experimental
feature, set the environment variable OCI_ANSIBLE_EXPERIMENTAL to True.
suboptions:
attachment_state:
description: Attach a volume to the instance I(instance_id) with I(attachment_state=present). Detach a
volume from the instance I(instance_id) with I(attachment_state=absent).
default: present
choices: ['present', 'absent']
attachment_name:
description: A user-friendly name. Does not have to be unique, and it cannot be changed. Avoid entering
confidential information.
type:
description: The type of volume. The only supported value is "iscsi".
default: iscsi
choices: ['iscsi']
volume_id:
description: The OCID of the volume to be attached to or detached from the instance I(instance_id).
vnic:
description: Details for the primary VNIC that is automatically created and attached when the instance is
launched. Required when creating a compute instance with I(state=present). Updating any of these
child properties is not supported through this module.
aliases: ['create_vnic_details']
suboptions:
assign_public_ip:
description: Determines whether the VNIC should be assigned a public IP address. If
not set and the VNIC is being created in a private subnet (that is,
where I(prohibitPublicIpOnVnic = true) in the Subnet), then no public
IP address is assigned. If not set and the subnet is public
I(prohibitPublicIpOnVnic = false), then a public IP address is
assigned. If set to true and I(prohibitPublicIpOnVnic = true),
an error is returned.
Note this field will be used on initial create but will not be considered when
determining whether to match an existing resource or create a new one.
hostname_label:
description: The hostname for the VNIC's primary private IP. Used for DNS. The value
is the hostname portion of the primary private IP's fully qualified
domain name (FQDN) (for example, bminstance-1 in FQDN
bminstance-1.subnet123.vcn1.oraclevcn.com). Must be unique across all
VNICs in the subnet and comply with RFC 952 and RFC 1123.
Note this field will be used on initial create but will not be considered when
determining whether to match an existing resource or create a new one.
name:
description: A user-friendly name for the VNIC. Does not have to be unique.
Note this field will be used on initial create but will not be considered when
determining whether to match an existing resource or create a new one.
nsg_ids:
description: A list of the OCIDs of the network security groups (NSGs) to add the VNIC to.
For more information about NSGs, see NetworkSecurityGroup L(NetworkSecurityGroup,
https://docs.cloud.oracle.com/iaas/api/#/en/iaas/20160918/NetworkSecurityGroup/).
Note this field will be used on initial create but will not be considered when
determining whether to match an existing resource or create a new one.
private_ip:
description: The private IP to assign to the VNIC. Must be an available IP address
within the subnet's CIDR. If you don't specify a value, Oracle
automatically assigns a private IP address from the subnet. This is
the VNIC's primary private IP address.
Note this field will be used on initial create but will not be considered when
determining whether to match an existing resource or create a new one.
skip_source_dest_check:
description: Determines whether the source/destination check is disabled on the VNIC.
Defaults to false, which means the check is performed.
Note this field will be used on initial create but will not be considered when
determining whether to match an existing resource or create a new one.
default: false
subnet_id:
description: The OCID of the subnet to create the VNIC in.
Note this field will be used on initial create but will not be considered when
determining whether to match an existing resource or create a new one.
required: true
is_pv_encryption_in_transit_enabled:
description: Whether to enable in-transit encryption for the boot volume's paravirtualized attachment.
The default value is false.
author: "<NAME> (@sivakumart)"
extends_documentation_fragment: [ oracle, oracle_creatable_resource, oracle_wait_options, oracle_tags ]
"""
EXAMPLES = """
- name: Launch/create an instance using an image, with custom metadata and a private IP assignment
oci_instance:
name: myinstance1
availability_domain: "BnQb:PHX-AD-1"
compartment_id: "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx...vm62xq"
shape: "BM.Standard1.36"
metadata:
foo: bar
baz: quux
source_details:
source_type: image
image_id: ocid1.image.oc1.phx.xxxxxEXAMPLExxxxx
volume_details:
attachment_state: present
volume_id: ocid1.volume.oc1.phx.xxxxxEXAMPLExxxxx
vnic:
hostname_label: "myinstance1"
private_ip: "10.0.0.5"
subnet_id: "ocid1.subnet.oc1.phx.xxxxxEXAMPLExxxxx...5iddusmpqpaoa"
- name: Launch/create an instance using a boot volume, a private IP assignment and attach a volume, and a specific
fault domain
oci_instance:
name: myinstance2
availability_domain: "BnQb:PHX-AD-1"
fault_domain: "FAULT-DOMAIN-2"
source_details:
source_type: bootVolume
boot_volume_id: ocid1.bootvolume.oc1.iad.xxxxxEXAMPLExxxxx
compartment_id: "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx...vm62xq"
shape: "BM.Standard1.36"
volume_details:
attachment_state: present
volume_id: ocid1.volume.oc1.phx.xxxxxEXAMPLExxxxx
vnic:
hostname_label: "myinstance2"
private_ip: "10.0.0.6"
subnet_id: "ocid1.subnet.oc1.phx.xxxxxEXAMPLExxxxx...5iddusmpqpaoa"
- name: Launch/create an instance using an image with custom boot volume size
oci_instance:
name: myinstance1
availability_domain: "BnQb:PHX-AD-1"
compartment_id: "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx...vm62xq"
shape: "BM.Standard1.36"
source_details:
source_type: image
image_id: ocid1.image.oc1.phx.xxxxxEXAMPLExxxxx
boot_volume_size_in_gbs: 100
vnic:
hostname_label: "myinstance1"
subnet_id: "ocid1.subnet.oc1.phx.xxxxxEXAMPLExxxxx...5iddusmpqpaoa"
- name: Update an instance's name
oci_instance:
name: myinstance1-new-name
id: "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx...lxiggdq"
- name: Detach a volume from an instance
oci_instance:
id: "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx...lxiggdq"
volume_details:
attachment_state: absent
volume_id: ocid1.volume.oc1.phx.xxxxxEXAMPLExxxxx
- name: Stop an instance
oci_instance:
id: "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx...lxiggdq"
state: "stopped"
- name: Stop an instance and detach boot volume
oci_instance:
id: "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx...lxiggdq"
state: "stopped"
boot_volume_details:
boot_volume_id: ocid1.bootvolume.oc1.iad.xxxxxEXAMPLExxxxx
attachment_state: absent
- name: Attach a boot volume & Start an instance
oci_instance:
id: "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx...lxiggdq"
state: "running"
boot_volume_details:
boot_volume_id: ocid1.bootvolume.oc1.iad.xxxxxEXAMPLExxxxx
- name: Reset an instance
oci_instance:
id: "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx...lxiggdq"
state: "reset"
- name: Terminate/delete an instance
oci_instance:
id: "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx...lxiggdq"
state: "absent"
- name: Terminate/delete an instance and preserve boot volume
oci_instance:
id: "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx...lxiggdq"
state: "absent"
preserve_boot_volume: yes
"""
RETURN = """
instance:
description: Details of the OCI compute instance launched, updated or terminated as a result of the current operation
returned: On successful operation (create, update and terminate) on a single Compute instance
type: complex
contains:
availability_domain:
description: The Availability Domain the instance is running in.
returned: always
type: string
sample: BnQb:PHX-AD-1
boot_volume_attachment:
description: Information of the boot volume attachment.
returned: In experimental mode.
type: dict
contains:
availability_domain:
description: The Availability Domain of the instance.
returned: always
type: string
sample: BnQb:PHX-AD-1
boot_volume_id:
description: The OCID of the boot volume.
returned: always
type: string
sample: ocid1.bootvolume.oc1.iad.xxxxxEXAMPLExxxxx
compartment_id:
description: The OCID of the compartment.
returned: always
type: string
sample: ocid1.compartment.oc1..xxxxxEXAMPLExxxxx
display_name:
description: A user-friendly name. Does not have to be unique, and it cannot be changed.
returned: always
type: string
sample: My boot volume attachment
id:
description: The OCID of the boot volume attachment.
returned: always
type: string
sample: ocid1.instance.oc1.iad.xxxxxEXAMPLExxxxx
instance_id:
description: The OCID of the instance the boot volume is attached to.
returned: always
type: string
sample: ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx
lifecycle_state:
description: The current state of the boot volume attachment.
returned: always
type: string
sample: ATTACHED
time_created:
description: The date and time the boot volume was created, in the format defined by RFC3339.
returned: always
type: string
sample: 2016-08-25T21:10:29.600Z
compartment_id:
description: The OCID of the compartment that contains the instance.
returned: always
type: string
sample: ocid1.compartment.oc1..xxxxxEXAMPLExxxxx....62xq
display_name:
description: A user-friendly name for the instance
returned: always
type: string
sample: ansible-instance-968
extended_metadata:
description: Additional key-value pairs associated with the instance
returned: always
type: dict(str, str)
sample: {'foo': 'bar'}
fault_domain:
description: The name of the fault domain the instance is running in. A fault domain is a grouping of
hardware and infrastructure within an availability domain. Each availability domain contains
three fault domains. Fault domains let you distribute your instances so that they are not on
the same physical hardware within a single availability domain. A hardware failure or Compute
hardware maintenance that affects one fault domain does not affect instances in other fault
domains. If you do not specify the fault domain, the system selects one for you. To change the
fault domain for an instance, terminate it and launch a new instance in the preferred fault
domain.
returned: always
type: string
sample: "FAULT-DOMAIN-1"
id:
description: The OCID of the instance.
returned: always
type: string
sample: ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx
image_id:
description: The OCID of the image that the instance is based on
returned: always
type: string
sample: ocid1.image.oc1.iad.xxxxxEXAMPLExxxxx
ipxe_script:
description: A custom iPXE script that will run when the instance boots
returned: always
type: string
sample: null
lifecycle_state:
description: The current state of the instance.
returned: always
type: string
sample: TERMINATED
metadata:
description: Custom metadata that was associated with the instance
returned: always
type: dict(str, str)
sample: {"foo": "bar"}
region:
description: The region that contains the Availability Domain the instance is running in.
returned: always
type: string
sample: phx
shape:
description: The shape of the instance. The shape determines the number of CPUs and the amount of memory
allocated to the instance.
returned: always
type: string
sample: BM.Standard1.36
time_created:
description: The date and time the instance was created, in the format defined by RFC3339
returned: always
type: string
sample: 2017-11-20T04:52:54.541000+00:00
volume_attachments:
description: List of information about volume attachments
returned: In experimental mode.
type: complex
contains:
attachment_type:
description: The type of volume attachment.
returned: always
type: string
sample: iscsi
availability_domain:
description: The Availability Domain of an instance.
returned: always
type: string
sample: BnQb:PHX-AD-1
chap_secret:
description: The Challenge-Handshake-Authentication-Protocol (CHAP) secret valid for the associated CHAP
user name. (Also called the "CHAP password".)
returned: always
type: string
sample: d6866c0d-298b-48ba-95af-309b4faux45e
chap_username:
description: The volume's system-generated Challenge-Handshake-Authentication-Protocol (CHAP) user name.
returned: always
type: string
sample: ocid1.volume.oc1.phx.xxxxxEXAMPLExxxxx
compartment_id:
description: The OCID of the compartment.
returned: always
type: string
sample: ocid1.compartment.oc1..xxxxxEXAMPLExxxxx
display_name:
description: A user-friendly name. Does not have to be unique, and it cannot be changed.
returned: always
type: string
sample: My volume attachment
id:
description: The OCID of the volume attachment.
returned: always
type: string
sample: ocid1.volumeattachment.oc1.phx.xxxxxEXAMPLExxxxx
instance_id:
description: The OCID of the instance the volume is attached to.
returned: always
type: string
sample: ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx
ipv4:
description: The volume's iSCSI IP address.
returned: always
type: string
sample: 169.254.0.2
iqn:
description: The target volume's iSCSI Qualified Name in the format defined by RFC 3720.
returned: always
type: string
sample: iqn.2015-12.us.oracle.com:456b0391-17b8-4122-bbf1-f85fc0bb97d9
lifecycle_state:
description: The current state of the volume attachment.
returned: always
type: string
sample: ATTACHED
port:
description: The volume's iSCSI port.
returned: always
type: int
sample: 3260
time_created:
description: The date and time the volume was created, in the format defined by RFC3339.
returned: always
type: string
sample: 2016-08-25T21:10:29.600Z
volume_id:
description: The OCID of the volume.
returned: always
type: string
sample: ocid1.volume.oc1.phx.xxxxxEXAMPLExxxxx
launch_options:
description:
- ""
returned: on success
type: complex
contains:
boot_volume_type:
description:
- "Emulation type for volume.
* `ISCSI` - ISCSI attached block storage device. This is the default for Boot Volumes and Remote Block
Storage volumes on Oracle provided images.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for Local data
volumes on Oracle provided images.
* `PARAVIRTUALIZED` - Paravirtualized disk."
returned: on success
type: string
sample: ISCSI
firmware:
description:
- "Firmware used to boot VM. Select the option that matches your operating system.
* `BIOS` - Boot VM using BIOS style firmware. This is compatible with both 32 bit and 64 bit operating
systems that boot using MBR style bootloaders.
* `UEFI_64` - Boot VM using UEFI style firmware compatible with 64 bit operating systems. This is the
default for Oracle provided images."
returned: on success
type: string
sample: BIOS
network_type:
description:
- "Emulation type for the physical network interface card (NIC).
* `E1000` - Emulated Gigabit ethernet controller. Compatible with Linux e1000 network driver.
* `VFIO` - Direct attached Virtual Function network controller. This is the networking type
when you launch an instance using hardware-assisted (SR-IOV) networking.
* `PARAVIRTUALIZED` - VM instances launch with paravirtualized devices using virtio drivers."
returned: on success
type: string
sample: E1000
remote_data_volume_type:
description:
- "Emulation type for volume.
* `ISCSI` - ISCSI attached block storage device. This is the default for Boot Volumes and Remote Block
Storage volumes on Oracle provided images.
* `SCSI` - Emulated SCSI disk.
* `IDE` - Emulated IDE disk.
* `VFIO` - Direct attached Virtual Function storage. This is the default option for Local data
volumes on Oracle provided images.
* `PARAVIRTUALIZED` - Paravirtualized disk."
returned: on success
type: string
sample: ISCSI
is_pv_encryption_in_transit_enabled:
description:
- Whether to enable in-transit encryption for the boot volume's paravirtualized attachment. The default value is false.
returned: on success
type: bool
sample: true
is_consistent_volume_naming_enabled:
description:
- Whether to enable consistent volume naming feature. Defaults to false.
returned: on success
type: bool
sample: true
sample: [{"availability_domain": "BnQb:PHX-AD-1",
"boot_volume_attachment": {
"availability_domain": "IwGV:US-ASHBURN-AD-1",
"boot_volume_id": "ocid1.bootvolume.oc1.iad.xxxxxEXAMPLExxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"display_name": "Remote boot attachment for instance",
"id": "ocid1.instance.oc1.iad.xxxxxEXAMPLExxxxx",
"instance_id": "ocid1.instance.oc1.iad.xxxxxEXAMPLExxxxx",
"lifecycle_state": "ATTACHED",
"time_created": "2018-01-15T07:23:10.838000+00:00"
},
"compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx...vm62xq",
"display_name": "ansible-test-968",
"extended_metadata": {},
"fault_domain": "FAULT-DOMAIN-1",
"id": "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx....lxiggdq",
"image_id": "ocid1.image.oc1.phx.xxxxxEXAMPLExxxxx....7klnoa",
"ipxe_script": null,
"lifecycle_state": "RUNNING",
"metadata": {"baz": "quux", "foo": "bar"},
"region": "phx",
"shape": "BM.Standard1.36",
"time_created": "2017-11-14T16:09:07.557000+00:00",
"volume_attachments": [{
"attachment_type": "iscsi",
"availability_domain": "BnQb:PHX-AD-1",
"chap_secret": null,
"chap_username": null,
"compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
"display_name": "ansible_volume_attachment",
"id": "ocid1.volumeattachment.oc1.phx.xxxxxEXAMPLExxxxx",
"instance_id": "ocid1.instance.oc1.phx.xxxxxEXAMPLExxxxx",
"ipv4": "169.254.2.2",
"iqn": "iqn.2015-12.com.oracleiaas:472a085d-41a9-4c18-ae7d-dea5b296dad3",
"lifecycle_state": "ATTACHED",
"port": 3260,
"time_created": "2017-11-23T11:17:50.139000+00:00",
"volume_id": "ocid1.volume.oc1.phx.xxxxxEXAMPLExxxxx"
}]
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils, oci_compute_utils
from ansible.module_utils.oracle.oci_utils import check_mode
from ansible.module_utils import six
try:
import oci
from oci.core.compute_client import ComputeClient
from oci.core.virtual_network_client import VirtualNetworkClient
from oci.core.models import AttachBootVolumeDetails
from oci.core.models import AttachVolumeDetails
from oci.core.models import LaunchInstanceDetails
from oci.core.models import UpdateInstanceDetails
from oci.core.models import CreateVnicDetails
from oci.core.models import InstanceSourceViaBootVolumeDetails
from oci.core.models import InstanceSourceViaImageDetails
from oci.util import to_dict
from oci.exceptions import ServiceError, MaximumWaitTimeExceeded
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
RESOURCE_NAME = "instance"
def detach_volume(compute_client, module, volume_attachment_id):
result = dict()
result["changed"] = False
try:
volume_attachment = oci_utils.call_with_backoff(
compute_client.get_volume_attachment,
volume_attachment_id=volume_attachment_id,
).data
if volume_attachment.lifecycle_state in ["DETACHING", "DETACHED"]:
result["changed"] = False
result["volume_attachment"] = to_dict(volume_attachment)
else:
oci_utils.call_with_backoff(
compute_client.detach_volume, volume_attachment_id=volume_attachment_id
)
response = oci_utils.call_with_backoff(
compute_client.get_volume_attachment,
volume_attachment_id=volume_attachment_id,
)
result["volume_attachment"] = to_dict(
oci.wait_until(
compute_client, response, "lifecycle_state", "DETACHED"
).data
)
result["changed"] = True
except MaximumWaitTimeExceeded as ex:
module.fail_json(msg=str(ex))
except ServiceError as ex:
module.fail_json(msg=ex.message)
return result
def get_attach_volume_details(instance_id, volume_id, type, attachment_name=None):
attach_volume_details = AttachVolumeDetails()
attach_volume_details.display_name = attachment_name
attach_volume_details.instance_id = instance_id
attach_volume_details.type = type
attach_volume_details.volume_id = volume_id
return attach_volume_details
def attach_volume(compute_client, module, attach_volume_details):
result = dict()
result["changed"] = False
try:
response = oci_utils.call_with_backoff(
compute_client.attach_volume, attach_volume_details=attach_volume_details
)
response = oci_utils.call_with_backoff(
compute_client.get_volume_attachment, volume_attachment_id=response.data.id
)
result["volume_attachment"] = to_dict(
oci.wait_until(compute_client, response, "lifecycle_state", "ATTACHED").data
)
result["changed"] = True
return result
except ServiceError as ex:
module.fail_json(msg=ex.message)
except MaximumWaitTimeExceeded as ex:
module.fail_json(msg=str(ex))
def terminate_instance(compute_client, id, module):
return oci_utils.delete_and_wait(
resource_type=RESOURCE_NAME,
client=compute_client,
get_fn=compute_client.get_instance,
kwargs_get={"instance_id": id},
delete_fn=compute_client.terminate_instance,
kwargs_delete={
"instance_id": id,
"preserve_boot_volume": module.params["preserve_boot_volume"],
},
module=module,
)
def update_instance(compute_client, instance, module):
result = dict()
changed = False
try:
uid = UpdateInstanceDetails()
if not oci_utils.are_attrs_equal(
current_resource=instance,
module=module,
attributes=uid.attribute_map.keys(),
):
# Update-able attributes are unequal, let us update the resource
uid = oci_utils.update_model_with_user_options(
curr_model=instance, update_model=uid, module=module
)
response = oci_utils.call_with_backoff(
compute_client.update_instance,
instance_id=instance.id,
update_instance_details=uid,
)
changed = True
# retain instances for backward compat
# result["instances"] = [to_dict(response.data)]
result["instance"] = to_dict(response.data)
else:
# No change needed, return the current instance
# retain instances for backward compat
# result["instances"] = [to_dict(instance)]
result["instance"] = to_dict(instance)
except ServiceError as ex:
module.fail_json(msg=ex.message)
result["changed"] = changed
return result
def power_action_on_instance(compute_client, id, desired_state, module):
result = {}
changed = False
# The power action to execute on a compute instance to reach the desired 'state'
state_action_map = {
"stopped": "STOP",
"running": "START",
"reset": "RESET",
"softreset": "SOFTRESET",
}
# The desired lifecycle state for the compute instance to reach the user specified 'state'
desired_lifecycle_states = {
"stopped": "STOPPED",
"running": "RUNNING",
"reset": "RUNNING",
"softreset": "RUNNING",
}
try:
response = oci_utils.call_with_backoff(
compute_client.get_instance, instance_id=id
)
curr_state = response.data.lifecycle_state
change_required = False
# We need to perform a power action if the current state doesn't match the desired state
if curr_state != desired_lifecycle_states[desired_state]:
change_required = True
# Resets also require a change
if desired_state in ["softreset", "reset"]:
change_required = True
if change_required:
changed = True
oci_utils.call_with_backoff(
compute_client.instance_action,
instance_id=id,
action=state_action_map[desired_state],
)
response = oci_utils.call_with_backoff(
compute_client.get_instance, instance_id=id
)
# for now the power actions on instances do not go through common utilities for wait.
if module.params.get("wait", None):
debug(
"waiting for lifecycle_state to reach {0}".format(
desired_lifecycle_states[desired_state]
)
)
oci.wait_until(
compute_client,
response,
"lifecycle_state",
desired_lifecycle_states[desired_state],
max_wait_seconds=module.params.get(
"wait_timeout", oci_utils.MAX_WAIT_TIMEOUT_IN_SECONDS
),
)
response = oci_utils.call_with_backoff(
compute_client.get_instance, instance_id=id
)
else:
debug(
"Not waiting for power action request {0} as 'wait' is false.".format(
desired_state
)
)
# retain instances for backward compat
# result["instances"] = [to_dict(response.data)]
result["instance"] = to_dict(response.data)
except ServiceError as ex:
module.fail_json(msg=ex.message)
except MaximumWaitTimeExceeded as ex:
module.fail_json(msg=str(ex))
result["changed"] = changed
return result
def launch_instance(compute_client, module):
lid = get_launch_instance_details(module)
cvd = get_vnic_details(module)
lid.create_vnic_details = cvd
debug("Provisioning " + str(lid))
result = oci_utils.create_and_wait(
resource_type=RESOURCE_NAME,
client=compute_client,
create_fn=compute_client.launch_instance,
kwargs_create={"launch_instance_details": lid},
get_fn=compute_client.get_instance,
get_param="instance_id",
module=module,
)
return result
def get_vnic_details(module):
vnic_details = module.params.get("vnic", None)
if not vnic_details:
# Primary VNIC details(especially subnet_id is required)
module.fail_json(
msg="state is present and instance_id is not specified, but create_vnic_details is not "
"specified."
)
cvd = CreateVnicDetails()
cvd.display_name = vnic_details.get("name", None)
cvd.assign_public_ip = vnic_details.get("assign_public_ip", None)
cvd.hostname_label = vnic_details.get("hostname_label", None)
cvd.private_ip = vnic_details.get("private_ip", None)
cvd.skip_source_dest_check = vnic_details.get("skip_source_dest_check", None)
cvd.subnet_id = vnic_details["subnet_id"]
cvd.nsg_ids = vnic_details.get("nsg_ids", None)
return cvd
def get_source_details_from_module(module):
# An instance's source can either be specified by top-level options "image_id" or via "source_details"
if "source_details" in module.params and module.params["source_details"]:
source_details = module.params["source_details"]
source_type = source_details.get("source_type")
if source_type == "image":
image_id = source_details.get("image_id")
boot_volume_size_in_gbs = source_details.get("boot_volume_size_in_gbs")
if not image_id:
module.fail_json(
msg="state is present, source_details' type is specified as image, but image_id is not"
"specified"
)
return _create_instance_source_via_image(
image_id, boot_volume_size_in_gbs=boot_volume_size_in_gbs
)
if source_type == "bootVolume":
boot_volume_id = source_details.get("boot_volume_id")
if not boot_volume_id:
module.fail_json(
msg="state is present, source_details' type is specified as bootVolume, but "
"boot_volume_id is not specified"
)
return _create_instance_source_via_boot_volume(boot_volume_id)
module.fail_json(
msg="value of source_type must be one of: 'bootVolume', 'image'"
)
elif "image_id" in module.params and module.params["image_id"]:
return _create_instance_source_via_image(module.params["image_id"])
return None
def get_launch_instance_details(module):
lid = LaunchInstanceDetails()
lid.display_name = module.params["name"]
lid.availability_domain = module.params["availability_domain"]
lid.compartment_id = module.params["compartment_id"]
# 'fault_domain' requires OCI Python SDK 2.0.1
fault_domain = module.params["fault_domain"]
if fault_domain is not None:
if "fault_domain" in lid.attribute_map:
lid.fault_domain = fault_domain
else:
module.fail_json(
msg="OCI Python SDK 2.0.1 or above is required to support `fault_domain`. The local SDK"
"version is {0}".format(oci.__version__)
)
lid.extended_metadata = module.params["extended_metadata"]
lid.metadata = module.params["metadata"]
lid.ipxe_script = module.params["ipxe_script"]
lid.shape = module.params["shape"]
oci_utils.add_tags_to_model_from_module(lid, module)
lid.source_details = get_source_details_from_module(module)
if module.params.get("is_pv_encryption_in_transit_enabled") is not None:
lid.is_pv_encryption_in_transit_enabled = module.params[
"is_pv_encryption_in_transit_enabled"
]
return lid
def _create_instance_source_via_image(image_id, boot_volume_size_in_gbs=None):
instance_source_details = InstanceSourceViaImageDetails()
instance_source_details.image_id = image_id
if boot_volume_size_in_gbs:
instance_source_details.boot_volume_size_in_gbs = boot_volume_size_in_gbs
return instance_source_details
def _create_instance_source_via_boot_volume(boot_volume_id):
instance_source_details = InstanceSourceViaBootVolumeDetails()
instance_source_details.boot_volume_id = boot_volume_id
return instance_source_details
def debug(s):
get_logger().debug(s)
def handle_volume_attachment(compute_client, module, volume_id, instance_id):
result = dict()
volume_details = module.params["volume_details"]
compartment_id = module.params["compartment_id"]
if instance_id is None:
instance_id = module.params["instance_id"]
if compartment_id is None:
compartment_id = compute_client.get_instance(instance_id).data.compartment_id
try:
# Check if volume_id is already attached to instance_id
volume_attachments = to_dict(
compute_client.list_volume_attachments(
compartment_id, instance_id=instance_id, volume_id=volume_id
).data
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
# Case when volume_id is already ATTACHED or is ATTACHING to instance_id
for volume_attachment in volume_attachments:
if volume_attachment["lifecycle_state"] in ["ATTACHING", "ATTACHED"]:
result["changed"] = False
return result
key_list = ["attachment_name", "type"]
param_map = dict(
(k, v)
for (k, v) in six.iteritems(volume_details)
if k in key_list and v is not None
)
attach_volume_details = get_attach_volume_details(
instance_id=instance_id, volume_id=volume_id, **param_map
)
return attach_volume(compute_client, module, attach_volume_details)
def handle_volume_detachment(compute_client, module, volume_id):
result = dict()
compartment_id = module.params["compartment_id"]
instance_id = module.params["instance_id"]
if compartment_id is None:
compartment_id = compute_client.get_instance(instance_id).data.compartment_id
try:
# Get the volume attachment with the instance_id & volume_id
volume_attachments = to_dict(
compute_client.list_volume_attachments(
compartment_id, instance_id=instance_id, volume_id=volume_id
).data
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
# Volume attachment with volume_id & instance_id does not exist
if not volume_attachments:
result["changed"] = False
return result
for volume_attachment in volume_attachments:
if volume_attachment["lifecycle_state"] in ["ATTACHED"]:
volume_attachment_id = volume_attachment["id"]
return detach_volume(compute_client, module, volume_attachment_id)
# Case when all volume attachments for instance_id & volume_id are in non-ATTACHED state
result["changed"] = False
return result
def combine_result(result, attachment_result, boot_volume_attachment_result):
combined_result = result
if attachment_result is None:
attachment_result = {}
if boot_volume_attachment_result is None:
boot_volume_attachment_result = {}
combined_result["changed"] = any(
[
result["changed"],
attachment_result.get("changed", False),
boot_volume_attachment_result.get("changed", False),
]
)
return combined_result
@check_mode
def handle_volume_details(compute_client, module, instance_id=None):
attachment_result = dict(changed=False)
volume_details = module.params["volume_details"]
if volume_details:
if "attachment_state" in volume_details:
attachment_state = volume_details["attachment_state"]
else:
attachment_state = "present"
# Check if volume_id is specified
if "volume_id" in volume_details and volume_details["volume_id"] is not None:
volume_id = volume_details["volume_id"]
if attachment_state == "present":
attachment_result = handle_volume_attachment(
compute_client, module, volume_id, instance_id
)
elif attachment_state == "absent":
attachment_result = handle_volume_detachment(
compute_client, module, volume_id
)
else:
module.fail_json(msg="Invalid attachment_state under volume_details")
else:
attachment_result["changed"] = False
return attachment_result
@check_mode
def add_volume_attachment_info(module, compute_client, result):
if "instance" in result:
try:
instance = result["instance"]
vol_attachments = oci_compute_utils.get_volume_attachments(
compute_client, instance
)
result["instance"]["volume_attachments"] = vol_attachments
except ServiceError as ex:
module.fail_json(msg=ex.message)
# Boot volume attachment attach and detach actions do not have separate "wait" related options. They share the
# module's options for wait and wait timeout.
def attach_boot_volume(compute_client, module, attach_boot_volume_details):
return oci_utils.create_and_wait(
resource_type="boot_volume_attachment",
client=compute_client,
create_fn=compute_client.attach_boot_volume,
kwargs_create={"attach_boot_volume_details": attach_boot_volume_details},
get_fn=compute_client.get_boot_volume_attachment,
get_param="boot_volume_attachment_id",
module=module,
)
def add_primary_ip_info(module, compute_client, network_client, result):
if "instance" in result:
try:
instance = result["instance"]
primary_public_ip, primary_private_ip = oci_compute_utils.get_primary_ips(
compute_client, network_client, instance
)
instance["primary_public_ip"] = primary_public_ip
instance["primary_private_ip"] = primary_private_ip
except ServiceError as ex:
instance["primary_public_ip"] = None
instance["primary_private_ip"] = None
module.fail_json(msg=ex.message)
def get_attach_boot_volume_details(instance_id, boot_volume_id, attachment_name=None):
attach_boot_volume_details = AttachBootVolumeDetails()
attach_boot_volume_details.display_name = attachment_name
attach_boot_volume_details.instance_id = instance_id
attach_boot_volume_details.boot_volume_id = boot_volume_id
return attach_boot_volume_details
def handle_boot_volume_attachment(compute_client, module, boot_volume_id, instance_id):
result = dict()
compartment_id = module.params["compartment_id"]
ad = module.params["availability_domain"]
if instance_id is None:
instance_id = module.params["instance_id"]
try:
if compartment_id is None:
compartment_id = compute_client.get_instance(
instance_id
).data.compartment_id
if ad is None:
ad = compute_client.get_instance(instance_id).data.availability_domain
# Check if boot_volume_id is already attached to instance_id
boot_volume_attachments = to_dict(
compute_client.list_boot_volume_attachments(
ad,
compartment_id,
instance_id=instance_id,
boot_volume_id=boot_volume_id,
).data
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
# Case when boot_volume_id is already ATTACHED or is ATTACHING to instance_id
for boot_volume_attachment in boot_volume_attachments:
if boot_volume_attachment["lifecycle_state"] in ["ATTACHING", "ATTACHED"]:
result["changed"] = False
return result
attach_boot_volume_details = get_attach_boot_volume_details(
instance_id=instance_id, boot_volume_id=boot_volume_id
)
return attach_boot_volume(compute_client, module, attach_boot_volume_details)
def detach_boot_volume(compute_client, module, boot_volume_attachment_id):
return oci_utils.delete_and_wait(
resource_type="boot_volume_attachment",
client=compute_client,
get_fn=compute_client.get_boot_volume_attachment,
kwargs_get={"boot_volume_attachment_id": boot_volume_attachment_id},
delete_fn=compute_client.detach_boot_volume,
kwargs_delete={"boot_volume_attachment_id": boot_volume_attachment_id},
module=module,
)
def handle_boot_volume_detachment(compute_client, module, boot_volume_id):
result = dict()
compartment_id = module.params["compartment_id"]
instance_id = module.params["instance_id"]
ad = module.params["availability_domain"]
try:
if compartment_id is None:
compartment_id = compute_client.get_instance(
instance_id
).data.compartment_id
if ad is None:
ad = compute_client.get_instance(instance_id).data.availability_domain
# Get the boot volume attachment with the instance_id & volume_id
boot_volume_attachments = to_dict(
compute_client.list_boot_volume_attachments(
ad,
compartment_id,
instance_id=instance_id,
boot_volume_id=boot_volume_id,
).data
)
except ServiceError as ex:
module.fail_json(msg=ex.message)
# Boot volume attachment with volume_id & instance_id does not exist
if not boot_volume_attachments:
result["changed"] = False
return result
for boot_volume_attachment in boot_volume_attachments:
if boot_volume_attachment["lifecycle_state"] == "ATTACHED":
boot_volume_attachment_id = boot_volume_attachment["id"]
return detach_boot_volume(compute_client, module, boot_volume_attachment_id)
# Case when boot volume attachment for instance_id & volume_id is in non-ATTACHED state
result["changed"] = False
return result
@check_mode
def handle_boot_volume_details(compute_client, module, instance_id=None):
attachment_result = dict(changed=False)
boot_volume_details = module.params["boot_volume_details"]
if boot_volume_details:
if "attachment_state" in boot_volume_details:
attachment_state = boot_volume_details["attachment_state"]
else:
attachment_state = "present"
# Check if boot_volume_id is specified
if (
"boot_volume_id" in boot_volume_details
and boot_volume_details["boot_volume_id"] is not None
):
boot_volume_id = boot_volume_details["boot_volume_id"]
if attachment_state == "present":
attachment_result = handle_boot_volume_attachment(
compute_client, module, boot_volume_id, instance_id
)
elif attachment_state == "absent":
attachment_result = handle_boot_volume_detachment(
compute_client, module, boot_volume_id
)
else:
module.fail_json(
msg="Invalid attachment_state under boot_volume_details"
)
return attachment_result
@check_mode
def add_boot_volume_attachment_info(module, compute_client, result):
if "instance" in result:
try:
instance = result["instance"]
boot_vol_attachment = oci_compute_utils.get_boot_volume_attachment(
compute_client, instance
)
result["instance"]["boot_volume_attachment"] = boot_vol_attachment
except ServiceError as ex:
module.fail_json(msg=ex.message)
def _get_default_source_details(module):
"""
Return the user specified value of image_id value as default for source_details.
The GET model of the Resource API returns `image_id` in the `source_details` section of the Resource. So,
we construct an equivalent source_details for a user-specified "image_id" and set as the default value for
the "source_details" object, so that an existing resource with the same state matches.
"""
if "source_details" in module.params and module.params["source_details"]:
return module.params["source_details"]
elif module.params.get("image_id"):
image_id = module.params["image_id"]
return {"source_type": "image", "image_id": image_id}
return None
def _get_default_image_id(module):
"""
Return the image_id if the image_id was specified through "source_details" or None.
"""
if "source_details" in module.params and module.params["source_details"]:
source_details = module.params["source_details"]
source_type = source_details.get("source_type")
if not source_type:
if "source_type" not in source_details:
module.fail_json(
msg="source_type required and must be one of: 'bootVolume', 'image'"
)
if source_type == "image":
return source_details["image_id"]
return None
def set_logger(my_logger):
global logger
logger = my_logger
def get_logger():
return logger
def _get_exclude_attributes(module):
# display_name is generated by OCI if unspecified, so always exclude it during matching
exclude_attributes = {"display_name": True}
if "source_details" in module.params and module.params["source_details"]:
source_details = module.params["source_details"]
if "source_type" not in source_details:
module.fail_json(
msg="source_type required and must be one of: 'bootVolume', 'image'"
)
if source_details["source_type"] == "bootVolume":
# if an Instance is being created by a boot volume id, ignore the "image_id" attribute of the existing
# resources during matching
exclude_attributes.update({"image_id": True})
return exclude_attributes
def create_one_instance(compute_client, module):
# is_pv_encryption_in_transit_enabled is a top level param on LaunchInstanceDetails but it gets returned
# inside Instance.LaunchOptions so we need to propagate the value so that the existing resource matching
# logic works properly
default_is_pv_encryption_in_transit_enabled = (
module.params.get("is_pv_encryption_in_transit_enabled")
if module.params.get("is_pv_encryption_in_transit_enabled") is not None
else False
)
result = oci_utils.check_and_create_resource(
resource_type="instance",
create_fn=launch_instance,
kwargs_create={"compute_client": compute_client, "module": module},
list_fn=compute_client.list_instances,
kwargs_list={"compartment_id": module.params["compartment_id"]},
module=module,
model=LaunchInstanceDetails(),
exclude_attributes=_get_exclude_attributes(module),
default_attribute_values={
"ipxe_script": None,
"extended_metadata": {},
"metadata": {},
"launch_options": {
"is_pv_encryption_in_transit_enabled": default_is_pv_encryption_in_transit_enabled
},
# during matching, if an existing
# resource has the same values as the
# current user request, consider it as
# a match.
"source_details": _get_default_source_details(module),
"image_id": _get_default_image_id(module),
},
)
# Handle volume details when an instance is launched
vol_attachment_result = {}
if result["changed"]:
vol_attachment_result = handle_volume_details(
compute_client, module, instance_id=result["instance"]["id"]
)
return result, vol_attachment_result
def _generate_name_for_instance(name_prefix, suffix):
# If the 'display_name' is specified as a printf like string, use the user-specified format,
# else the standard format to generate the name is <name_prefix>-<suffix>
try:
return name_prefix % suffix
except TypeError:
if name_prefix:
return name_prefix + "-" + str(suffix)
return None
def main():
my_logger = oci_utils.get_logger("oci_instance")
set_logger(my_logger)
module_args = oci_utils.get_taggable_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
availability_domain=dict(type="str", required=False),
boot_volume_details=dict(type="dict", required=False),
compartment_id=dict(type="str", required=False),
extended_metadata=dict(type="dict", required=False),
fault_domain=dict(type="str", required=False),
instance_id=dict(type="str", required=False, aliases=["id"]),
image_id=dict(type="str", required=False),
ipxe_script=dict(type="str", required=False),
metadata=dict(type="dict", required=False),
name=dict(type="str", required=False, aliases=["display_name"]),
preserve_boot_volume=dict(type="bool", required=False, default=False),
shape=dict(type="str", required=False),
state=dict(
type="str",
required=False,
default="present",
choices=[
"present",
"absent",
"running",
"stopped",
"reset",
"softreset",
],
),
volume_details=dict(type="dict", required=False),
source_details=dict(type="dict", required=False),
vnic=dict(type="dict", aliases=["create_vnic_details"]),
is_pv_encryption_in_transit_enabled=dict(type="bool"),
)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
required_if=[["state", "absent", ["instance_id"]]],
mutually_exclusive=[
["boot_volume_details", "image_id"],
["vnic", "instance_id"],
["source_details", "image_id"],
],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
compute_client = oci_utils.create_service_client(module, ComputeClient)
network_client = oci_utils.create_service_client(module, VirtualNetworkClient)
state = module.params["state"]
result = dict(changed=False)
vol_attachment_result = dict(changed=False)
boot_volume_attachment_result = dict(changed=False)
id = module.params["instance_id"]
try:
if id is not None:
inst = None
# Attempt to get the instance
try:
inst = oci_utils.call_with_backoff(
compute_client.get_instance, instance_id=id
).data
except ServiceError as se:
module.fail_json(msg=se.message())
if state == "absent":
if inst is not None:
terminate_result = terminate_instance(compute_client, id, module)
result["changed"] = terminate_result["changed"]
# result["instances"] = [terminate_result["instance"]]
result["instance"] = terminate_result["instance"]
else:
pass # instance is already deleted.
elif state == "present":
result = update_instance(compute_client, inst, module)
# Handle volume details after update-instance operation
vol_attachment_result = handle_volume_details(compute_client, module)
# Handle boot volume details after update-instance operation
boot_volume_attachment_result = handle_boot_volume_details(
compute_client, module
)
else:
# One of the power actions needs to be applied
# If a boot volume is to be attached to an instance & the instance should be in RUNNING state,
# the attachment should be done before the power_action_on_instance.
if state == "running":
boot_volume_attachment_result = handle_boot_volume_details(
compute_client, module
)
# perform power actions on instance
result = power_action_on_instance(compute_client, id, state, module)
# Handle volume details after power action on instance
vol_attachment_result = handle_volume_details(compute_client, module)
# If a boot volume is to be detached from an instance & the instance should be in STOPPED state,
# the detachment should be done after the power_action_on_instance.
if state == "stopped":
boot_volume_attachment_result = handle_boot_volume_details(
compute_client, module
)
else:
debug("check and create instance")
create_result, vol_attachment_result = create_one_instance(
compute_client, module
)
result["changed"] = create_result["changed"]
# result["instances"] = [create_result["instance"]]
result["instance"] = create_result["instance"]
result = combine_result(
result, vol_attachment_result, boot_volume_attachment_result
)
add_volume_attachment_info(module, compute_client, result)
add_boot_volume_attachment_info(module, compute_client, result)
add_primary_ip_info(module, compute_client, network_client, result)
result["instances"] = [result["instance"]]
module.exit_json(**result)
except ServiceError as se:
module.fail_json(msg=se.message)
if __name__ == "__main__":
main()
|
tests/test_tensor_api.py | tao-harald/geoopt | 438 | 11161234 | import geoopt
import torch
import pytest
def test_allow_empty_parameter_compat():
p = geoopt.ManifoldParameter()
assert p.shape == (0,)
def test_compare_manifolds():
m1 = geoopt.Euclidean()
m2 = geoopt.Euclidean(ndim=1)
tensor = geoopt.ManifoldTensor(10, manifold=m1)
with pytest.raises(ValueError) as e:
_ = geoopt.ManifoldParameter(tensor, manifold=m2)
assert e.match("Manifolds do not match")
def test_manifold_parameter_attribute():
p = geoopt.ManifoldParameter()
assert hasattr(p, "manifold")
def test_manifold_attribute():
p = geoopt.ManifoldTensor()
assert hasattr(p, "manifold")
def test_no_type_promotion():
p = geoopt.Sphere().random(10)
t = p.manifold.proju(p, torch.randn(10))
assert not isinstance(t, type(p))
|
kansha/card_addons/gallery/view.py | AnomalistDesignLLC/kansha | 161 | 11161244 | # --
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
from nagare.i18n import _
from nagare import presentation, ajax, security, component
from .comp import Gallery, Asset, AssetCropper
def render_image(self, h, comp, size, randomize=False, **kw):
metadata = self.assets_manager.get_metadata(self.filename)
src = self.assets_manager.get_image_url(self.filename, size)
if randomize:
src += '?r=' + h.generate_id()
return h.img(title=metadata['filename'], alt=metadata['filename'],
src=src, **kw)
def render_file(self, h, comp, size, **kw):
kw['class'] += ' file_icon'
metadata = self.assets_manager.get_metadata(self.filename)
res = [h.img(title=metadata['filename'], alt=metadata['filename'],
src="img/file-icon.jpg", **kw)]
if size == 'medium':
res.append(h.span(metadata['filename']))
return res
CONTENT_TYPES = {'image/png': render_image,
'image/jpeg': render_image,
'image/pjpeg': render_image,
'image/gif': render_image}
@presentation.render_for(Gallery)
def render(self, h, comp, *args):
self.load_assets()
with h.div(id='gal' + self.comp_id):
with h.div(class_='nbItems'):
h << comp.render(h, model='badge')
with h.div(id="card-gallery"):
h << comp.render(h, self.model)
return h.root
@presentation.render_for(Gallery, 'view')
def render_Gallery_view(self, h, comp, model):
model = 'edit' if security.has_permissions('edit', self) else 'anonymous'
for asset in self.assets:
h << asset.render(h, model)
return h.root
@presentation.render_for(Gallery, 'crop')
def render_Gallery_crop(self, h, comp, model):
return self.cropper.on_answer(self.action)
@presentation.render_for(Gallery, 'cover')
def render_cover(self, h, comp, model):
cover = self.get_cover()
if cover:
h << h.p(component.Component(cover, model='cover'), class_='cover')
return h.root
@presentation.render_for(Gallery, model='badge')
def render_gallery_badge(self, h, *args):
"""Gallery badge for the card"""
num_assets = self.num_assets
if num_assets:
with h.span(class_='badge'):
h << h.span(h.i(class_='icon-file-empty'), ' ', num_assets, class_='label')
return h.root
@presentation.render_for(Gallery, "action")
def render_download(self, h, comp, *args):
if security.has_permissions('edit', self):
submit_id = h.generate_id("attach_submit")
input_id = h.generate_id("attach_input")
h << h.label((h.i(class_='icon-file-empty'),
_("Add file")), class_='btn', for_=input_id)
with h.form:
h << h.script(
u'''
function valueChanged(e) {
if (YAHOO.kansha.app.checkFileSize(this, %(max_size)s)) {
YAHOO.util.Dom.get(%(submit_id)s).click();
YAHOO.kansha.app.showWaiter();
} else {
alert(%(error)s);
}
}
YAHOO.util.Event.onDOMReady(function() {
YAHOO.util.Event.on(%(input_id)s, 'change', valueChanged);
});''' %
{
'max_size': ajax.py2js(self.assets_manager.max_size),
'input_id': ajax.py2js(input_id),
'submit_id': ajax.py2js(submit_id),
'error': ajax.py2js(
_(u'Max file size exceeded')
).decode('UTF-8')
}
)
submit_action = ajax.Update(
render=lambda r: r.div(comp.render(r, model=None), r.script('YAHOO.kansha.app.hideWaiter()')),
component_to_update='gal' + self.comp_id,
)
h << h.input(id=input_id, class_='hidden', type="file", name="file", multiple="multiple", maxlength="100",).action(self.add_assets)
h << h.input(class_='hidden', id=submit_id, type="submit").action(submit_action)
return h.root
@presentation.render_for(Asset)
@presentation.render_for(Asset, model='thumb')
@presentation.render_for(Asset, model='medium')
@presentation.render_for(Asset, model='cover')
def render_asset(self, h, comp, model, *args):
res = []
metadata = self.assets_manager.get_metadata(self.filename)
kw = {'randomize': True} if model == 'cover' else {}
kw['class'] = model
if self.is_cover:
res.append(h.span(class_='is_cover'))
meth = CONTENT_TYPES.get(metadata['content-type'], render_file)
res.append(meth(self, h, comp, model, **kw))
return res
@presentation.render_for(Asset, model='edit')
def render_Asset_thumb(self, h, comp, model, *args):
with h.div(class_='asset'):
action = h.a.action(lambda: comp.answer(('delete', self))).get('onclick')
onclick = _(u'Are you sure you want to delete this file?')
onclick = u'if (confirm("%s")) { %s }' % (onclick, action)
with h.a(class_='delete', title=_(u'Delete'), href='#', onclick=onclick):
h << h.i(class_='icon-cross')
if self.is_image():
with h.a(class_='cover', title=_(u'Configure cover')).action(lambda: comp.answer(('configure_cover', self))):
if self.is_cover:
h << {'style': 'visibility: visible'}
h << h.i(class_='icon-checkmark')
with h.a(href=self.assets_manager.get_image_url(self.filename), target='_blank'):
h << comp.render(h, 'thumb')
return h.root
@presentation.render_for(Asset, model="anonymous")
def render_asset_anonymous(self, h, comp, model, *args):
with h.div(class_='asset'):
with h.a(href=self.assets_manager.get_image_url(self.filename), target='_blank'):
h << comp.render(h, model="thumb")
return h.root
@presentation.render_for(AssetCropper)
def render_gallery_cropper(self, h, comp, *args):
h << h.p(_('Use the controls below to create the cover of your card.'))
form_id = h.generate_id()
img_id = h.generate_id()
with h.form:
for crop_name in 'crop_left', 'crop_top', 'crop_width', 'crop_height':
h << h.input(type='hidden', id=form_id + '_' + crop_name).action(getattr(self, crop_name))
h << h.p(render_image(self.asset, h, comp, 'medium', id=img_id))
h << h.script(
"YAHOO.util.Event.onContentReady(%s,"
"function(){setTimeout(function(){YAHOO.kansha.app.initCrop(%s, %s, %s, %s)}, 500)})" % (
ajax.py2js(img_id),
ajax.py2js(img_id),
ajax.py2js(form_id),
ajax.py2js(self.crop_width()),
ajax.py2js(self.crop_height())
)
)
with h.div(class_='buttons'):
h << h.button(_('Create cover'), class_='btn btn-primary').action(self.commit, comp)
if self.asset.is_cover:
h << ' '
h << h.button(_('Remove cover'), class_='btn delete').action(self.remove_cover, comp)
h << ' '
h << h.button(_('Cancel'), class_='btn').action(self.cancel, comp)
return h.root
|
jsonrpcserver/result.py | bcb/jsonrpcserver | 144 | 11161265 | """Result data types - the results of calling a method.
Results are the JSON-RPC response objects
(https://www.jsonrpc.org/specification#response_object), minus the "jsonrpc" and "id"
parts - the library takes care of these parts for you.
The public functions are Success, Error and InvalidParams.
"""
from typing import Any, NamedTuple
from oslash.either import Either, Left, Right # type: ignore
from .codes import ERROR_INVALID_PARAMS, ERROR_METHOD_NOT_FOUND, ERROR_INTERNAL_ERROR
from .sentinels import NODATA
class SuccessResult(NamedTuple):
result: Any = None
def __repr__(self) -> str:
return f"SuccessResult({self.result!r})"
class ErrorResult(NamedTuple):
code: int
message: str
data: Any = NODATA # The spec says this value may be omitted
def __repr__(self) -> str:
return f"ErrorResult(code={self.code!r}, message={self.message!r}, data={self.data!r})"
# Union of the two valid result types
Result = Either[SuccessResult, ErrorResult]
# Helpers
def MethodNotFoundResult(data: Any) -> ErrorResult:
return ErrorResult(ERROR_METHOD_NOT_FOUND, "Method not found", data)
def InternalErrorResult(data: Any) -> ErrorResult:
return ErrorResult(ERROR_INTERNAL_ERROR, "Internal error", data)
def InvalidParamsResult(data: Any = NODATA) -> ErrorResult:
return ErrorResult(ERROR_INVALID_PARAMS, "Invalid params", data)
# Helpers (the public functions)
def Success(*args: Any, **kwargs: Any) -> Either[ErrorResult, SuccessResult]:
return Right(SuccessResult(*args, **kwargs))
def Error(*args: Any, **kwargs: Any) -> Either[ErrorResult, SuccessResult]:
return Left(ErrorResult(*args, **kwargs))
def InvalidParams(*args: Any, **kwargs: Any) -> Either[ErrorResult, SuccessResult]:
"""InvalidParams is a shortcut to save you from having to pass the Invalid Params
JSON-RPC code to Error.
"""
return Left(InvalidParamsResult(*args, **kwargs))
|
tkinter/entry-bind-key-release-to-select-text/example-1.py | whitmans-max/python-examples | 140 | 11161287 | #!/usr/bin/env python3
'''
Because after releasing keys `<Control-a>` selection is removed
so I use `after()` to execute selection after 50ms.
It selects all text (but it moves cursor to the beginning)
and moves cursor to the end.
'''
import tkinter as tk
def callback(event):
print('e.get():', e.get())
# or more universal
print('event.widget.get():', event.widget.get())
# select text after 50ms
root.after(50, select_all, event.widget)
def select_all(widget):
# select text
widget.select_range(0, 'end')
# move cursor to the end
widget.icursor('end')
root = tk.Tk()
e = tk.Entry(root)
e.pack()
e.bind('<Control-a>', callback)
root.mainloop()
|
aries_cloudagent/config/tests/test_default_context.py | kuraakhilesh8230/aries-cloudagent-python | 247 | 11161293 | <gh_stars>100-1000
from tempfile import NamedTemporaryFile
from asynctest import TestCase as AsyncTestCase
from ...cache.base import BaseCache
from ...core.profile import ProfileManager
from ...core.protocol_registry import ProtocolRegistry
from ...transport.wire_format import BaseWireFormat
from ..default_context import DefaultContextBuilder
from ..injection_context import InjectionContext
class TestDefaultContext(AsyncTestCase):
async def test_build_context(self):
"""Test context init."""
builder = DefaultContextBuilder()
result = await builder.build_context()
assert isinstance(result, InjectionContext)
for cls in (
BaseCache,
BaseWireFormat,
ProfileManager,
ProtocolRegistry,
):
assert isinstance(result.inject(cls), cls)
builder = DefaultContextBuilder(
settings={
"timing.enabled": True,
"timing.log.file": NamedTemporaryFile().name,
"multitenant.admin_enabled": True,
}
)
result = await builder.build_context()
assert isinstance(result, InjectionContext)
|
jython/travelingsalesman.py | theturpinator/randomized-optimization-ABAGAIL | 235 | 11161323 | <filename>jython/travelingsalesman.py
# traveling salesman algorithm implementation in jython
# This also prints the index of the points of the shortest route.
# To make a plot of the route, write the points at these indexes
# to a file and plot them in your favorite tool.
import sys
import os
import time
import java.io.FileReader as FileReader
import java.io.File as File
import java.lang.String as String
import java.lang.StringBuffer as StringBuffer
import java.lang.Boolean as Boolean
import java.util.Random as Random
import dist.DiscreteDependencyTree as DiscreteDependencyTree
import dist.DiscreteUniformDistribution as DiscreteUniformDistribution
import dist.Distribution as Distribution
import dist.DiscretePermutationDistribution as DiscretePermutationDistribution
import opt.DiscreteChangeOneNeighbor as DiscreteChangeOneNeighbor
import opt.EvaluationFunction as EvaluationFunction
import opt.GenericHillClimbingProblem as GenericHillClimbingProblem
import opt.HillClimbingProblem as HillClimbingProblem
import opt.NeighborFunction as NeighborFunction
import opt.RandomizedHillClimbing as RandomizedHillClimbing
import opt.SimulatedAnnealing as SimulatedAnnealing
import opt.example.FourPeaksEvaluationFunction as FourPeaksEvaluationFunction
import opt.ga.CrossoverFunction as CrossoverFunction
import opt.ga.SingleCrossOver as SingleCrossOver
import opt.ga.DiscreteChangeOneMutation as DiscreteChangeOneMutation
import opt.ga.GenericGeneticAlgorithmProblem as GenericGeneticAlgorithmProblem
import opt.ga.GeneticAlgorithmProblem as GeneticAlgorithmProblem
import opt.ga.MutationFunction as MutationFunction
import opt.ga.StandardGeneticAlgorithm as StandardGeneticAlgorithm
import opt.ga.UniformCrossOver as UniformCrossOver
import opt.prob.GenericProbabilisticOptimizationProblem as GenericProbabilisticOptimizationProblem
import opt.prob.MIMIC as MIMIC
import opt.prob.ProbabilisticOptimizationProblem as ProbabilisticOptimizationProblem
import shared.FixedIterationTrainer as FixedIterationTrainer
import opt.example.TravelingSalesmanEvaluationFunction as TravelingSalesmanEvaluationFunction
import opt.example.TravelingSalesmanRouteEvaluationFunction as TravelingSalesmanRouteEvaluationFunction
import opt.SwapNeighbor as SwapNeighbor
import opt.ga.SwapMutation as SwapMutation
import opt.example.TravelingSalesmanCrossOver as TravelingSalesmanCrossOver
import opt.example.TravelingSalesmanSortEvaluationFunction as TravelingSalesmanSortEvaluationFunction
import shared.Instance as Instance
import util.ABAGAILArrays as ABAGAILArrays
from array import array
"""
Commandline parameter(s):
none
"""
# set N value. This is the number of points
N = 50
random = Random()
points = [[0 for x in xrange(2)] for x in xrange(N)]
for i in range(0, len(points)):
points[i][0] = random.nextDouble()
points[i][1] = random.nextDouble()
ef = TravelingSalesmanRouteEvaluationFunction(points)
odd = DiscretePermutationDistribution(N)
nf = SwapNeighbor()
mf = SwapMutation()
cf = TravelingSalesmanCrossOver(ef)
hcp = GenericHillClimbingProblem(ef, odd, nf)
gap = GenericGeneticAlgorithmProblem(ef, odd, mf, cf)
rhc = RandomizedHillClimbing(hcp)
fit = FixedIterationTrainer(rhc, 200000)
fit.train()
print "RHC Inverse of Distance: " + str(ef.value(rhc.getOptimal()))
print "Route:"
path = []
for x in range(0,N):
path.append(rhc.getOptimal().getDiscrete(x))
print path
sa = SimulatedAnnealing(1E12, .999, hcp)
fit = FixedIterationTrainer(sa, 200000)
fit.train()
print "SA Inverse of Distance: " + str(ef.value(sa.getOptimal()))
print "Route:"
path = []
for x in range(0,N):
path.append(sa.getOptimal().getDiscrete(x))
print path
ga = StandardGeneticAlgorithm(2000, 1500, 250, gap)
fit = FixedIterationTrainer(ga, 1000)
fit.train()
print "GA Inverse of Distance: " + str(ef.value(ga.getOptimal()))
print "Route:"
path = []
for x in range(0,N):
path.append(ga.getOptimal().getDiscrete(x))
print path
# for mimic we use a sort encoding
ef = TravelingSalesmanSortEvaluationFunction(points);
fill = [N] * N
ranges = array('i', fill)
odd = DiscreteUniformDistribution(ranges);
df = DiscreteDependencyTree(.1, ranges);
pop = GenericProbabilisticOptimizationProblem(ef, odd, df);
mimic = MIMIC(500, 100, pop)
fit = FixedIterationTrainer(mimic, 1000)
fit.train()
print "MIMIC Inverse of Distance: " + str(ef.value(mimic.getOptimal()))
print "Route:"
path = []
optimal = mimic.getOptimal()
fill = [0] * optimal.size()
ddata = array('d', fill)
for i in range(0,len(ddata)):
ddata[i] = optimal.getContinuous(i)
order = ABAGAILArrays.indices(optimal.size())
ABAGAILArrays.quicksort(ddata, order)
print order
|
src/controller/python/chip/logging/__init__.py | summercms/connectedhomeip | 3,495 | 11161332 | #
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from chip.logging.library_handle import _GetLoggingLibraryHandle
from chip.logging.types import LogRedirectCallback_t
import logging
# Defines match support/logging/Constants.h (LogCategory enum)
ERROR_CATEGORY_NONE = 0
ERROR_CATEGORY_ERROR = 1
ERROR_CATEGORY_PROGRESS = 2
ERROR_CATEGORY_DETAIL = 3
@LogRedirectCallback_t
def _RedirectToPythonLogging(category, module, message):
module = module.decode('utf-8')
message = message.decode('utf-8')
logger = logging.getLogger('chip.%s' % module)
if category == ERROR_CATEGORY_ERROR:
logger.error("%s", message)
elif category == ERROR_CATEGORY_PROGRESS:
logger.info("%s", message)
elif category == ERROR_CATEGORY_DETAIL:
logger.debug("%s", message)
else:
# All logs are expected to have some reasonable category. This treats
# unknonw/None as critical.
logging.critical("%s", message)
def RedirectToPythonLogging():
"""Redireects CHIP logging to python logging module."""
handle = _GetLoggingLibraryHandle()
handle.pychip_logging_set_callback(_RedirectToPythonLogging)
|
fds/run.py | CyberFlameGO/fds | 322 | 11161342 | <filename>fds/run.py
import enum
from shutil import which
import requests
from pathlib import Path
import sys
from fds.domain.commands import Commands
from fds.logger import Logger
from fds.services.dvc_service import DVCService
from fds.services.fds_service import FdsService
from fds.services.types import InnerService
from fds.services.git_service import GitService
from fds.services.pretty_print import PrettyPrint
from fds.utils import execute_command, rerun_in_new_shell_and_exit, get_confirm_from_user
from .version import __version__
class HooksRunner(object):
class ExitCodes(enum.IntFlag):
OK = 0
DVC_INSTALL_FAILED = 2 ** 0
GIT_INSTALL_FAILED = 2 ** 1
FDS_UPDATE_FAILED = 2 ** 1
GIT_INITIALIZE_FAILED = 2 ** 3
DVC_INITIALIZE_FAILED = 2 ** 4
def __init__(
self,
service: FdsService,
printer: PrettyPrint,
logger: Logger,
):
self.service = service
self.printer = printer
self.logger = logger
self.update_check = [
(self.ExitCodes.FDS_UPDATE_FAILED.value, self._ensure_fds_updated)
]
self.pre_git_dvc_hooks = [
(self.ExitCodes.DVC_INSTALL_FAILED.value, self._ensure_dvc_installed),
(self.ExitCodes.GIT_INSTALL_FAILED.value, self._ensure_git_installed),
]
self.pre_execute_hooks = [
(self.ExitCodes.GIT_INITIALIZE_FAILED.value, self._ensure_git_initialized),
(self.ExitCodes.DVC_INITIALIZE_FAILED.value, self._ensure_dvc_initialized),
]
def _ensure_dvc_installed(self):
if which("dvc") is not None:
return 0
ret_code = 1
self.printer.error("dvc executable is not installed or found")
answer = get_confirm_from_user('Should we install dvc [https://dvc.org/] for you right now?\n' +
' Will install using `pip3 install dvc==2.3.0`', False)
if answer:
execute_command(["pip3 install 'dvc==2.3.0'"], shell=True, capture_output=False)
ret_code = 0
else:
# Provide instructions
self.printer.warn("You can install dvc manually from https://dvc.org/doc/install")
return ret_code
def _ensure_git_installed(self):
if which("git") is None:
self.printer.error("git executable is not found, please install git from https://git-scm.com/downloads")
return sys.exit(-1)
return 0
def _ensure_fds_updated(self):
r = requests.get("https://pypi.python.org/pypi/fastds/json")
data = r.json()
latest_version = data["info"]["version"]
if latest_version == __version__:
return 0
answer = get_confirm_from_user(f"You are using fds version {__version__}, however version {latest_version}"
f" is available.Should we upgrade using `pip3 install fastds --upgrade`", True)
if not answer:
return 0
print("\nUpgrading package.\n")
execute_command(["pip3 install fastds --upgrade"], shell=True, capture_output=False)
print("\nfds upgraded. Running command...\n")
rerun_in_new_shell_and_exit()
return 0
def __ensure_initialized(
self,
service_name: str,
service: InnerService,
raise_on_reject=True,
):
path = Path(service.repo_path).resolve()
if service.is_initialized():
return 0
self.printer.error(f"{service_name} has not been initialized in `{path}`")
answer = get_confirm_from_user(f'Should we initialize {service_name} for you right now?\n' +
f' Will initialize in `{path}`', False)
if answer:
service.init()
return 0
# Provide instructions
self.printer.warn(
f"You can initialize {service_name} manually by running `{service_name} init` in `{path}`"
)
if raise_on_reject:
sys.exit(-1)
return 1
def _ensure_git_initialized(self):
return self.__ensure_initialized("git", self.service.git_service)
def _ensure_dvc_initialized(self):
return self.__ensure_initialized("dvc", self.service.dvc_service)
def run(self, hooks):
# Check if dvc is installed
ret_code = 0
for exit_code, hook in hooks:
self.logger.debug(f"Running {hook.__qualname__}")
failed = hook()
if failed:
ret_code |= exit_code
return ret_code
class Run(object):
def __init__(self, arguments: dict):
self.logger = Logger.get_logger("fds.Run")
self.arguments = arguments
self.service = FdsService(GitService(), DVCService())
self.printer = PrettyPrint()
self.hooks_runner = HooksRunner(
self.service,
self.printer,
self.logger,
)
def execute(self):
arguments = self.arguments
self.logger.debug(f"arguments passed: {arguments}")
# No need to run any hooks
if arguments.get(Commands.VERSION.value):
self.service.version()
# Do version check after showing the version
self.hooks_runner.run(self.hooks_runner.update_check)
return 0
hook_ret_code = self.hooks_runner.run(self.hooks_runner.update_check)
if hook_ret_code != 0:
return hook_ret_code
# Run pre execute hooks pre git and dvc init hooks
hook_ret_code = self.hooks_runner.run(self.hooks_runner.pre_git_dvc_hooks)
if hook_ret_code != 0:
return hook_ret_code
if arguments["command"] == Commands.INIT.value:
# Run init command stuff
self.service.init()
return 0
elif arguments["command"] == Commands.CLONE.value:
# Run clone command stuff
self.service.clone(arguments["url"], arguments["folder_name"][0], arguments["dvc_remote"])
return 0
# Run pre execute hooks After git and dvc are initialized
hook_ret_code = self.hooks_runner.run(self.hooks_runner.pre_execute_hooks)
if hook_ret_code != 0:
return hook_ret_code
if arguments["command"] == Commands.STATUS.value:
# Run status command stuff
self.service.status()
return 0
elif arguments["command"] == Commands.ADD.value:
# Run add command stuff
self.service.add(arguments["add_command"])
return 0
elif arguments["command"] == Commands.COMMIT.value:
if len(arguments.get("message", [])) == 1:
message = arguments["message"][0]
elif len(arguments.get("m", [])) == 1:
message = arguments["m"][0]
else:
raise Exception("Enter a valid commit message")
# Run commit command stuff
self.service.commit(message, arguments['yes'])
return 0
elif arguments["command"] == Commands.PUSH.value:
# Run push command stuff
self.service.push(arguments["git_remote"], arguments["dvc_remote"], arguments["branch"])
return 0
elif arguments["command"] == Commands.SAVE.value:
# Run save command stuff
self.service.save(arguments["message"], arguments["git_remote"], arguments["dvc_remote"])
return 0
else:
raise Exception("Invalid operation")
|
src/GridCal/Gui/GIS/gis_dialogue.py | mzy2240/GridCal | 284 | 11161373 | import sys
import os
from PySide2.QtWidgets import *
from PySide2.QtWebEngineWidgets import QWebEngineView as QWebView, QWebEnginePage as QWebPage
import folium
from shutil import copyfile
from GridCal.Gui.GIS.gui import *
from GridCal.Engine.IO.file_system import get_create_gridcal_folder
class GISWindow(QMainWindow):
def __init__(self, external_file_path=''):
"""
Constructor
:param external_file_path: path to the file to open
"""
QMainWindow.__init__(self)
self.ui = Ui_GisWindow()
self.ui.setupUi(self)
self.setWindowTitle('GridCal - GIS')
# create web browser for the map
self.web_layout = QtWidgets.QVBoxLayout(self.ui.webFrame)
self.webView = QWebView()
self.web_layout.addWidget(self.webView)
self.ui.webFrame.setContentsMargins(0, 0, 0, 0)
self.web_layout.setContentsMargins(0, 0, 0, 0)
if os.path.exists(external_file_path):
self.file_path = external_file_path
else:
self.file_path = self.generate_blank_map_html(lon_avg=40.430, lat_avg=3.56)
self.webView.setUrl(QtCore.QUrl.fromLocalFile(self.file_path))
# # action linking
self.ui.actionSave_map.triggered.connect(self.save)
def closeEvent(self, event):
"""
Remove the file on close
:param event:
"""
if os.path.exists(self.file_path):
os.remove(self.file_path)
def save(self):
"""
Save a copy of the displayed map
:return:
"""
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
file, filter = QFileDialog.getSaveFileName(self, "Save map", '',
filter="html (*.html)",
options=options)
if file != '':
if not file.endswith('.html'):
file += '.html'
copyfile(self.file_path, file)
def msg(self, text, title="Warning"):
"""
Message box
:param text: Text to display
:param title: Name of the window
"""
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(text)
# msg.setInformativeText("This is additional information")
msg.setWindowTitle(title)
# msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
@staticmethod
def generate_blank_map_html(lon_avg, lat_avg):
"""
Generate a blank HTML map file
:param lon_avg: center longitude
:param lat_avg: center latitude
:return: file name
"""
my_map = folium.Map(location=[lon_avg, lat_avg], zoom_start=5)
gc_path = get_create_gridcal_folder()
path = os.path.join(gc_path, 'map.html')
my_map.save(path)
return path
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = GISWindow()
window.resize(1.61 * 700.0, 600.0) # golden ratio
window.show()
sys.exit(app.exec_())
|
code/load_strength_data.py | Apsu/engram | 103 | 11161391 | # Normalize by the highest peak force (middle finger):
middle_force = 2.36
index_force = 2.26
ring_force = 2.02
little_force = 1.84
middle_norm = 1.0
index_norm = index_force / middle_force
ring_norm = ring_force / middle_force
little_norm = little_force / middle_force
print('index/middle: {0}'.format(index_norm))
print('ring/middle: {0}'.format(ring_norm))
print('little/middle: {0}'.format(little_norm))
# Relative left/right hand strength (assume equal):
lf = 1.0
rf = 1.0
strengths24 = np.array((
lf * little_norm, lf * ring_norm, lf * middle_norm, lf * index_norm,
lf * little_norm, lf * ring_norm, lf * middle_norm, lf * index_norm,
lf * little_norm, lf * ring_norm, lf * middle_norm, lf * index_norm,
rf * index_norm, rf * middle_norm, rf * ring_norm, rf * little_norm,
rf * index_norm, rf * middle_norm, rf * ring_norm, rf * little_norm,
rf * index_norm, rf * middle_norm, rf * ring_norm, rf * little_norm))
# Create a finger-pair position strength matrix by adding pairs of strength values:
Strength24x24 = np.zeros((24, 24))
for i in range(24):
Strength24x24[i,:] = strengths24
Strength24x24 = (Strength24x24 + Strength24x24.transpose())
# Normalize matrix with min-max scaling to a range with maximum = 1:
#newMin = strength_factor
newMin = min_strength_factor # np.min(Strength24x24) / np.max(Strength24x24)
newMax = 1.0
Strength24x24 = newMin + (Strength24x24 - np.min(Strength24x24)) * (newMax - newMin) / (np.max(Strength24x24) - np.min(Strength24x24))
# Print:
print_matrix_info(matrix_data=Strength24x24, matrix_label="Strength24x24", nkeys=24, nlines=10)
heatmap(data=Strength24x24, title="Strength24x24", xlabel="Key 1", ylabel="Key 2", print_output=print_output)
penalty = 1.0 # Penalty for lateral (index, little) finger placement (1 = no penalty)
strengths32 = np.array((lf * little_norm, lf * ring_norm, lf * middle_norm, lf * index_norm,
lf * little_norm, lf * ring_norm, lf * middle_norm, lf * index_norm,
lf * little_norm, lf * ring_norm, lf * middle_norm, lf * index_norm,
rf * index_norm, rf * middle_norm, rf * ring_norm, rf * little_norm,
rf * index_norm, rf * middle_norm, rf * ring_norm, rf * little_norm,
rf * index_norm, rf * middle_norm, rf * ring_norm, rf * little_norm,
lf * index_norm * penalty, lf * index_norm * penalty, lf * index_norm * penalty,
rf * index_norm * penalty, rf * index_norm * penalty, rf * index_norm * penalty,
rf * little_norm * penalty, rf * little_norm * penalty))
# Create a finger-pair position strength matrix by adding pairs of strength values:
Strength32x32 = np.zeros((32, 32))
for i in range(32):
Strength32x32[i,:] = strengths32
Strength32x32 = (Strength32x32 + Strength32x32.transpose())
# Normalize matrix with min-max scaling to a range with maximum = 1:
newMin = np.min(Strength32x32) / np.max(Strength32x32)
newMax = 1.0
Strength32x32 = newMin + (Strength32x32 - np.min(Strength32x32)) * (newMax - newMin) / (np.max(Strength32x32) - np.min(Strength32x32))
# Print:
print_matrix_info(matrix_data=Strength32x32, matrix_label="Strength32x32", nkeys=32, nlines=10)
heatmap(data=Strength32x32, title="Strength32x32", xlabel="Key 1", ylabel="Key 2", print_output=print_output) |
wsltools/utils/compat.py | Symbo1/wsltools | 412 | 11161399 | # -*- coding: utf-8 -*-
import sys
PY2 = sys.version_info.major == 2
if PY2:
xrange = xrange
text_type = unicode
string_types = (str, unicode)
from urllib import unquote, urlencode
from urllib2 import urlopen, Request
from urlparse import urlparse, parse_qsl, urlunparse
else:
xrange = range
text_type = str
string_types = (str,)
from urllib.request import urlopen, Request
from urllib.parse import urlparse, parse_qsl, unquote, urlencode, urlunparse
def bytes_decode(content):
if isinstance(content, string_types):
pass
elif isinstance(content, bytes):
content = bytes.decode(content)
else:
content = str(content)
return content
|
metrics/log_collectors/simple_log_collector/src/tail_to_tds.py | adrian555/FfDL | 680 | 11161403 | <filename>metrics/log_collectors/simple_log_collector/src/tail_to_tds.py
#!/usr/bin/env python
#
# Copyright 2017-2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import argparse
import logging
from log_collectors.training_data_service_client import match_log_file
from log_collectors.training_data_service_client import push_log_line
from log_collectors.training_data_service_client import scan_log_dirs
def main():
logging.basicConfig(format='%(filename)s %(funcName)s %(lineno)d: %(message)s', level=logging.INFO)
log_directory = os.environ["LOG_DIR"]
# log_file = log_directory + "/latest-log"
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str, default=log_directory,
help='Log directory')
FLAGS, unparsed = parser.parse_known_args()
scan_log_dirs.LogScanner(should_connect=True).scan(
log_dir=FLAGS.log_dir,
is_log=match_log_file.is_log_file,
push_function=push_log_line.push)
if __name__ == '__main__':
main()
|
sarenka/backend/api_searcher/migrations/0005_auto_20210105_0315.py | adolabsnet/sarenka | 380 | 11161427 | # Generated by Django 3.1.4 on 2021-01-05 02:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api_searcher', '0004_auto_20201220_1200'),
]
operations = [
migrations.RemoveField(
model_name='cvemodel',
name='cwe',
),
migrations.RemoveField(
model_name='technicalimpactmodel',
name='cwe',
),
migrations.DeleteModel(
name='CausedByModel',
),
migrations.DeleteModel(
name='CVEModel',
),
migrations.DeleteModel(
name='CWEModel',
),
migrations.DeleteModel(
name='TechnicalImpactModel',
),
]
|
plugins/measurer/util_algo.py | Kitware/VAIME | 127 | 11161432 | <reponame>Kitware/VAIME
"""
Functional implementations of common algorithms
"""
import numpy as np
import scipy.optimize
def minimum_weight_assignment(cost):
"""
Finds optimal assignment between two disjoint sets of items
Args:
cost (ndarray): cost[i, j] is the cost between items i and j
CommandLine:
xdoctest viame.processes.camtrawl.util_algo minimum_weight_assignment
Example:
>>> # Rows are detections in img1, cols are detections in img2
>>> from viame.processes.camtrawl.util_algo import *
>>> cost = np.array([
>>> [9, 2, 1, 9],
>>> [4, 1, 5, 5],
>>> [9, 9, 2, 4],
>>> ])
>>> assign1 = minimum_weight_assignment(cost)
>>> print('assign1 = {!r}'.format(assign1))
assign1 = [(0, 2), (1, 1), (2, 3)]
>>> assign2 = minimum_weight_assignment(cost.T)
>>> print('assign2 = {!r}'.format(assign2))
assign2 = [(1, 1), (2, 0), (3, 2)]
"""
n1, n2 = cost.shape
n = max(n1, n2)
# Embed the [n1 x n2] matrix in a padded (with inf) [n x n] matrix
cost_matrix = np.full((n, n), fill_value=np.inf)
cost_matrix[0:n1, 0:n2] = cost
# Find an effective infinite value for infeasible assignments
is_infeasible = np.isinf(cost_matrix)
is_positive = cost_matrix > 0
feasible_vals = cost_matrix[~(is_infeasible & is_positive)]
large_val = (n + feasible_vals.sum()) * 2
# replace infinite values with effective infinite values
cost_matrix[is_infeasible] = large_val
# Solve munkres problem for minimum weight assignment
indexes = list(zip(*scipy.optimize.linear_sum_assignment(cost_matrix)))
# Return only the feasible assignments
assignment = [(i, j) for (i, j) in indexes
if cost_matrix[i, j] < large_val]
return assignment
|
sendgrid/helpers/mail/spam_url.py | modernwarfareuplink/sendgrid-python | 1,268 | 11161438 | class SpamUrl(object):
"""An Inbound Parse URL that you would like a copy of your email
along with the spam report to be sent to."""
def __init__(self, spam_url=None):
"""Create a SpamUrl object
:param spam_url: An Inbound Parse URL that you would like a copy of
your email along with the spam report to be sent to.
:type spam_url: string, optional
"""
self._spam_url = None
if spam_url is not None:
self.spam_url = spam_url
@property
def spam_url(self):
"""An Inbound Parse URL that you would like a copy of your email
along with the spam report to be sent to.
:rtype: string
"""
return self._spam_url
@spam_url.setter
def spam_url(self, value):
"""An Inbound Parse URL that you would like a copy of your email
along with the spam report to be sent to.
:param value: An Inbound Parse URL that you would like a copy of your
email along with the spam report to be sent to.
:type value: string
"""
self._spam_url = value
def get(self):
"""
Get a JSON-ready representation of this SpamUrl.
:returns: This SpamUrl, ready for use in a request body.
:rtype: string
"""
return self.spam_url
|
Visualizations/model_fooling.py | arafin-lab/model_inversion_experiments | 101 | 11161472 | <gh_stars>100-1000
from graphviz import Digraph
# MODEL FOOLING
def fooling_attack():
fool_attack = Digraph('Fooling Attacks',graph_attr={'size':'8.5,11.5'},comment='Taxonomy of Secure Deep Learning', )
fool_attack #doctest: +ELLIPSIS
# NODES:
fool_attack.node('Fooling Attacks', r'{<f0> Fooling Attacks |<f1> '+
r'https://arxiv.org/abs/1804.00097'+
r'\n\n}', shape='record')
fool_attack.node('L-BFGS', r'{<f0> L-BFGS |<f1> '+
r'https://arxiv.org/abs/1312.6199'+
r'\n\n}', shape='record')
fool_attack.node('FGSM', r'{<f0> Fast Gradient Sign \n Method (FGSM) |<f1> '+
r'https://arxiv.org/abs/1412.6572'+
r'\n\n}', shape='record')
fool_attack.node('Black Box', r'{<f0> Black Box/\n Transferable Attacks |<f1> '+
r'https://arxiv.org/abs/1611.02770'+
r'\n\n}' , shape = 'record')
fool_attack.node('BIM', r'{<f0> Basic Iterative Method(BIM)\n/ Iterative FGSM(I-FGSM) |<f1> '+
r'https://arxiv.org/abs/1607.02533'+
r'\n\n}', shape='record')
fool_attack.node('PGD', r'{<f0> Projected Gradient \n Descent |<f1> '+
r'https://arxiv.org/abs/1706.06083'+
r'\n\n}', shape='record')
fool_attack.node('ATN', r'{<f0> Adversarial Transformation \n Networks/ GANs |<f1> '+
r'https://arxiv.org/abs/1703.09387'+
r'\n\n}', shape='record')
# EDGES:
fool_attack.edge('Fooling Attacks', 'Black Box')
fool_attack.edge('Fooling Attacks', 'White Box')
fool_attack.edge('White Box', 'L-BFGS')
fool_attack.edge('White Box', 'FGSM')
fool_attack.edge('White Box', 'BIM')
fool_attack.edge('White Box', 'PGD')
fool_attack.edge('White Box', 'ATN')
return fool_attack
#
def fooling_defense():
fool_defense = Digraph('Fooling Defense',comment='Taxonomy of Secure Deep Learning', )
fool_defense.edge('Fooling Defenses', 'Gradient Masking')
fool_defense.edge('Gradient Masking', 'Complex Nonlinearities')
fool_defense.edge('Fooling Defenses', 'Adversarial Training')
fool_defense.edge('Fooling Defenses', 'Preprocessing')
fool_defense.edge('Complex Nonlinearities', 'RBF Neural Networks')
fool_defense.edge('Complex Nonlinearities', 'SVM Layers')
fool_defense.edge('Adversarial Training', 'Data Augmentation')
fool_defense.edge('Preprocessing', 'Noise Removal')
return fool_defense
|
zeus/migrations/f7c8c10f5aea_rethink_authors.py | conrad-kronos/zeus | 221 | 11161477 | """rethink_authors
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2017-07-14 13:20:01.610676
"""
import zeus
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<PASSWORD>"
branch_labels = ()
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_build_author_id", table_name="build")
op.drop_constraint("build_author_id_fkey", "build", type_="foreignkey")
op.drop_column("build", "author_id")
op.add_column(
"source", sa.Column("author_id", zeus.db.types.guid.GUID(), nullable=True)
)
op.create_index(op.f("ix_source_author_id"), "source", ["author_id"], unique=False)
op.create_foreign_key(None, "source", "author", ["author_id"], ["id"])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "source", type_="foreignkey")
op.drop_index(op.f("ix_source_author_id"), table_name="source")
op.drop_column("source", "author_id")
op.add_column(
"build",
sa.Column("author_id", postgresql.UUID(), autoincrement=False, nullable=True),
)
op.create_foreign_key(
"build_author_id_fkey", "build", "author", ["author_id"], ["id"]
)
op.create_index("ix_build_author_id", "build", ["author_id"], unique=False)
# ### end Alembic commands ###
|
tensorflow_probability/python/internal/test_util_scipy.py | m5l14i11/probability | 3,670 | 11161481 | <reponame>m5l14i11/probability<gh_stars>1000+
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for testing TFP code that depend on scipy."""
import numpy as np
import scipy.optimize as optimize
import scipy.stats as stats
__all__ = [
'binomial_confidence_interval',
]
# TODO(axch): Test this independently of its use in
# distributions/internal/correlation_matrix_volumes_lib
def binomial_confidence_interval(successes, trials, error_rate):
"""Computes a confidence interval on the true p of a binomial.
Assumes:
- The given `successes` count outcomes of an iid Bernoulli trial
with unknown probability p, that was repeated `trials` times.
Guarantees:
- The probability (over the randomness of drawing the given sample)
that the true p is outside the returned interval is no more than
the given `error_rate`.
Args:
successes: Python or numpy `int` number of successes.
trials: Python or numpy `int` number of trials.
error_rate: Python `float` admissible rate of mistakes.
Returns:
low_p: Lower bound of confidence interval.
high_p: Upper bound of confidence interval.
Raises:
ValueError: If scipy is not available.
"""
def p_small_enough(p):
log_prob = stats.binom.logcdf(successes, trials, p)
return log_prob - np.log(error_rate / 2.)
def p_big_enough(p):
log_prob = stats.binom.logsf(successes, trials, p)
return log_prob - np.log(error_rate / 2.)
if successes < trials:
high_p = optimize.brentq(
p_small_enough, successes / float(trials), 1., rtol=1e-9)
else:
high_p = 1.
if successes > 0:
low_p = optimize.brentq(
p_big_enough, 0., successes / float(trials), rtol=1e-9)
else:
low_p = 0.
return low_p, high_p
|
social/backends/flickr.py | raccoongang/python-social-auth | 1,987 | 11161485 | <reponame>raccoongang/python-social-auth<filename>social/backends/flickr.py
from social_core.backends.flickr import FlickrOAuth
|
indy_node/test/request_handlers/rich_schema/test_rich_schema_handler.py | Rob-S/indy-node | 627 | 11161486 | import pytest
from indy_common.constants import ENDORSER
from indy_node.server.request_handlers.domain_req_handlers.rich_schema.rich_schema_handler import RichSchemaHandler
from indy_node.test.request_handlers.helper import add_to_idr
from indy_node.test.request_handlers.rich_schema.helper import rich_schema_request
from plenum.common.constants import TRUSTEE
@pytest.fixture()
def rich_schema_handler(db_manager, write_auth_req_validator):
return RichSchemaHandler(db_manager, write_auth_req_validator)
@pytest.fixture()
def rich_schema_req(rich_schema_handler):
req = rich_schema_request()
add_to_idr(rich_schema_handler.database_manager.idr_cache, req.identifier, TRUSTEE)
add_to_idr(rich_schema_handler.database_manager.idr_cache, req.endorser, ENDORSER)
return req
def test_schema_dynamic_validation_passes(rich_schema_handler, rich_schema_req):
rich_schema_handler.dynamic_validation(rich_schema_req, 0)
|
gcloud/tests/apigw/views/test_get_user_project_detail.py | wkma/bk-sops | 881 | 11161493 | <filename>gcloud/tests/apigw/views/test_get_user_project_detail.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from pipeline.utils.collections import FancyDict
from gcloud.tests.mock import * # noqa
from gcloud.tests.mock_settings import * # noqa
from gcloud import err_code
from .utils import APITest
TEST_PROJECT_ID = "1"
TEST_PROJECT_ID_2 = "2"
TEST_PROJECT_NAME = "name"
TEST_BIZ_CC_ID = "2"
TEST_BIZ_NAME = "biz_name"
TEST_BIZ_DEVELOPERS = "TEST_BIZ_DEVELOPERS"
TEST_BIZ_MAINTAINER = "TEST_BIZ_MAINTAINER"
TEST_BIZ_TESTER = "TEST_BIZ_TESTER"
TEST_BIZ_PRODUCTOR = "TEST_BIZ_PRODUCTOR"
class GetUserProjectDetailAPITest(APITest):
def url(self):
return "/apigw/get_user_project_detail/{project_id}/"
@patch(
PROJECT_GET,
MagicMock(
return_value=MockProject(
project_id=TEST_PROJECT_ID, name=TEST_PROJECT_NAME, bk_biz_id=TEST_BIZ_CC_ID, from_cmdb=True,
)
),
)
@patch(
APIGW_GET_USER_PROJECT_DETAIL_GET_BUSINESS_DETAIL, MagicMock(side_effect=Exception()),
)
def test_get_user_project_detail__get_business_detail_raise(self):
response = self.client.get(path=self.url().format(project_id=TEST_PROJECT_ID))
data = json.loads(response.content)
self.assertFalse(data["result"])
self.assertTrue("message" in data)
self.assertEqual(data["code"], err_code.UNKNOWN_ERROR.code)
@patch(
PROJECT_GET,
MagicMock(
return_value=MockProject(
project_id=TEST_PROJECT_ID_2, name=TEST_PROJECT_NAME, bk_biz_id=TEST_BIZ_CC_ID, from_cmdb=True,
)
),
)
@patch(
APIGW_GET_USER_PROJECT_DETAIL_GET_BUSINESS_DETAIL,
MagicMock(
return_value=FancyDict(
bk_biz_id=TEST_BIZ_CC_ID,
bk_biz_name=TEST_BIZ_NAME,
bk_biz_developer=TEST_BIZ_DEVELOPERS,
bk_biz_maintainer=TEST_BIZ_MAINTAINER,
bk_biz_tester=TEST_BIZ_TESTER,
bk_biz_productor=TEST_BIZ_PRODUCTOR,
)
),
)
def test_get_user_project_detail__success(self):
response = self.client.get(path=self.url().format(project_id=TEST_PROJECT_ID_2))
data = json.loads(response.content)
self.assertTrue(data["result"])
self.assertEqual(data["code"], err_code.SUCCESS.code)
self.assertEqual(
data["data"],
{
"project_id": TEST_PROJECT_ID_2,
"project_name": TEST_PROJECT_NAME,
"from_cmdb": True,
"bk_biz_id": TEST_BIZ_CC_ID,
"bk_biz_name": TEST_BIZ_NAME,
"bk_biz_developer": TEST_BIZ_DEVELOPERS,
"bk_biz_maintainer": TEST_BIZ_MAINTAINER,
"bk_biz_tester": TEST_BIZ_TESTER,
"bk_biz_productor": TEST_BIZ_PRODUCTOR,
},
)
|
py/kubeflow/kubeflow/cd/notebook_servers/notebook_server_jupyter_pytorch_full_runner.py | zhyon404/kubeflow | 9,272 | 11161518 | # This file is only intended for development purposes
from kubeflow.kubeflow.cd import base_runner
base_runner.main(component_name="notebook_servers.notebook_server_jupyter_pytorch_full",
workflow_name="nb-j-pt-f-build")
|
bookwyrm/tests/models/test_group.py | mouse-reeve/fedireads | 270 | 11161533 | <filename>bookwyrm/tests/models/test_group.py
""" testing models """
from unittest.mock import patch
from django.test import TestCase
from bookwyrm import models
@patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async")
class Group(TestCase):
"""some activitypub oddness ahead"""
def setUp(self):
"""Set up for tests"""
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.owner_user = models.User.objects.create_user(
"mouse", "<EMAIL>", "mouseword", local=True, localname="mouse"
)
self.rat = models.User.objects.create_user(
"rat", "<EMAIL>", "ratword", local=True, localname="rat"
)
self.badger = models.User.objects.create_user(
"badger",
"<EMAIL>",
"badgerword",
local=True,
localname="badger",
)
self.capybara = models.User.objects.create_user(
"capybara",
"<EMAIL>",
"capybaraword",
local=True,
localname="capybara",
)
self.public_group = models.Group.objects.create(
name="Public Group",
description="Initial description",
user=self.owner_user,
privacy="public",
)
self.private_group = models.Group.objects.create(
name="Private Group",
description="Top secret",
user=self.owner_user,
privacy="direct",
)
self.followers_only_group = models.Group.objects.create(
name="Followers Group",
description="No strangers",
user=self.owner_user,
privacy="followers",
)
models.GroupMember.objects.create(group=self.private_group, user=self.badger)
models.GroupMember.objects.create(
group=self.followers_only_group, user=self.badger
)
models.GroupMember.objects.create(group=self.public_group, user=self.capybara)
def test_group_members_can_see_private_groups(self, _):
"""direct privacy group should not be excluded from group listings for group
members viewing"""
rat_groups = models.Group.privacy_filter(self.rat).all()
badger_groups = models.Group.privacy_filter(self.badger).all()
self.assertFalse(self.private_group in rat_groups)
self.assertTrue(self.private_group in badger_groups)
def test_group_members_can_see_followers_only_lists(self, _):
"""follower-only group booklists should not be excluded from group booklist
listing for group members who do not follower list owner"""
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
followers_list = models.List.objects.create(
name="Followers List",
curation="group",
privacy="followers",
group=self.public_group,
user=self.owner_user,
)
rat_lists = models.List.privacy_filter(self.rat).all()
badger_lists = models.List.privacy_filter(self.badger).all()
capybara_lists = models.List.privacy_filter(self.capybara).all()
self.assertFalse(followers_list in rat_lists)
self.assertFalse(followers_list in badger_lists)
self.assertTrue(followers_list in capybara_lists)
def test_group_members_can_see_private_lists(self, _):
"""private group booklists should not be excluded from group booklist listing
for group members"""
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
private_list = models.List.objects.create(
name="Private List",
privacy="direct",
curation="group",
group=self.public_group,
user=self.owner_user,
)
rat_lists = models.List.privacy_filter(self.rat).all()
badger_lists = models.List.privacy_filter(self.badger).all()
capybara_lists = models.List.privacy_filter(self.capybara).all()
self.assertFalse(private_list in rat_lists)
self.assertFalse(private_list in badger_lists)
self.assertTrue(private_list in capybara_lists)
|
homeassistant/components/fortios/__init__.py | domwillcode/home-assistant | 30,023 | 11161536 | """Fortinet FortiOS components."""
|
starfish/core/morphology/Filter/reduce.py | haoxusci/starfish | 164 | 11161575 | <gh_stars>100-1000
from typing import Callable, Optional, Tuple, Union
import numpy as np
from starfish.core.morphology.binary_mask import BinaryMaskCollection
from starfish.core.types import FunctionSource, FunctionSourceBundle
from ._base import FilterAlgorithm
class Reduce(FilterAlgorithm):
"""
Reduce takes masks from one ``BinaryMaskCollection`` and reduces it down to a single mask by
applying a specified function. That mask is then returned as a new ``BinaryMaskCollection``.
An initial value is used to start the reduction process. The first call to the function will be
called with ``initial`` and ``M0`` and produce ``R0``. The second call to the function will be
called with ``R0`` and ``M1`` and produce ``R1``.
Parameters
----------
func : Union[str, FunctionSourceBundle]
Function to reduce the tiles in the input.
If this value is a string, then the python package is :py:attr:`FunctionSource.np`.
If this value is a ``FunctionSourceBundle``, then the python package and module name is
obtained from the bundle.
initial : Union[np.ndarray, Callable[[Tuple[int, ...]], np.ndarray]]
An initial array that is the same shape as an uncropped mask, or a callable that accepts the
shape of an uncropped mask as its parameter and produces an initial array.
Examples
--------
Applying a logical 'AND' across all the masks in a collection.
>>> from starfish.core.morphology.binary_mask.test import factories
>>> from starfish.morphology import Filter
>>> from starfish.types import FunctionSource
>>> import numpy as np
>>> from skimage.morphology import disk
>>> binary_mask_collection = factories.binary_mask_collection_2d()
>>> initial_mask_producer = lambda shape: np.ones(shape=shape)
>>> ander = Filter.Reduce(FunctionSource.np("logical_and"), initial_mask_producer)
>>> anded = anded.run(binary_mask_collection)
See Also
--------
starfish.core.types.Axes
"""
def __init__(
self,
func: Union[str, FunctionSourceBundle],
initial: Union[np.ndarray, Callable[[Tuple[int, ...]], np.ndarray]],
*func_args,
**func_kwargs,
) -> None:
if isinstance(func, str):
self._func = FunctionSource.np(func)
elif isinstance(func, FunctionSourceBundle):
self._func = func
self._initial = initial
self._func_args = func_args
self._func_kwargs = func_kwargs
def run(
self,
binary_mask_collection: BinaryMaskCollection,
n_processes: Optional[int] = None,
*args,
**kwargs
) -> BinaryMaskCollection:
"""Map from input to output by applying a specified function to the input.
Parameters
----------
binary_mask_collection : BinaryMaskCollection
BinaryMaskCollection to be filtered.
n_processes : Optional[int]
The number of processes to use for apply. If None, uses the output of os.cpu_count()
(default = None).
Returns
-------
BinaryMaskCollection
Return the results of filter as a new BinaryMaskCollection.
"""
# Apply the reducing function
return binary_mask_collection._reduce(
self._func.resolve(),
self._initial,
*self._func_args,
**self._func_kwargs)
|
tests/testapp3/apps.py | thebjorn/django_dramatiq | 229 | 11161584 | <reponame>thebjorn/django_dramatiq<gh_stars>100-1000
from django.apps import AppConfig
class Testapp3Config(AppConfig):
name = "tests.testapp3"
|
tools/mo/openvino/tools/mo/front/kaldi/extractors/mul_ext.py | ryanloney/openvino-1 | 1,127 | 11161594 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.elementwise import Mul
from openvino.tools.mo.front.extractor import FrontExtractorOp
class MulFrontExtractor(FrontExtractorOp):
op = 'Mul'
enabled = True
@classmethod
def extract(cls, node):
Mul.update_node_stat(node, {})
return cls.enabled
|
tests/test_helpers/test_search/base.py | ejfitzgerald/agents-aea | 126 | 11161622 | <reponame>ejfitzgerald/agents-aea
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the search helper module."""
from aea.helpers.search.models import Location
def test_location_init():
"""Test the initialization of the location model"""
latitude = 51.507351
longitude = -0.127758
loc = Location(latitude, longitude)
latitude_2 = 48.856613
longitude_2 = 2.352222
loc2 = Location(latitude_2, longitude_2)
assert loc != loc2, "Locations should not be the same."
assert loc.distance(loc2) > 0.0, "Locations should be positive."
|
L1Trigger/CSCTrackFinder/test/csctfAnaData_cfg.py | ckamtsikis/cmssw | 852 | 11161639 | import FWCore.ParameterSet.Config as cms
import sys
print("Starting CSCTF Data Analyzer")
process = cms.Process("CSCTFEFF")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
#*****************************************************************************************************************************
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10000) )
#*****************************************************************************************************************************
fileOutName = "AnaDataHists.root"
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring()
)
process.source.fileNames.extend([
#High PU Run
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/FEAA124F-7EF3-E011-9208-003048673374.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/FE03EB11-5DF3-E011-B717-003048F117EC.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/FAB7E40D-76F3-E011-8566-BCAEC53296FF.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/FA1E6192-81F3-E011-B876-BCAEC53296FF.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/FA085011-7FF3-E011-9DE3-003048D2BC4C.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/F8A8B8E4-86F3-E011-BC5A-BCAEC53296F7.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/F8A32D30-80F3-E011-9CCE-001D09F24489.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/F86797FF-7DF3-E011-AC73-003048F024FE.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/F6C69A11-7FF3-E011-A256-001D09F24EAC.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/F62BF6A4-7BF3-E011-8A47-E0CB4E4408E3.root",
"/store/data/Run2011B/L1MuHPF/RAW/v1/000/178/208/F491E355-60F3-E011-9860-0030486780E6.root"
])
# Event Setup
##############
process.load("Configuration.StandardSequences.Services_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("EventFilter.CSCTFRawToDigi.csctfunpacker_cfi")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag ='GR_R_43_V3::All'
# L1 Emulator
#############PtEffStatsFilename
process.load("Configuration.StandardSequences.SimL1Emulator_cff")
#prints out Alex's Firmware debugging output
#process.simCsctfTrackDigis.SectorProcessor.isCoreVerbose = True
#process.simCsctfTrackDigis.SectorProcessor.initializeFromPSet = True
#Configurable options when PSet is True
#process.simCsctfTrackDigis.SectorProcessor.mindetap = cms.uint32(4)
#process.simCsctfTrackDigis.SectorProcessor.mindphip = cms.uint32(128)
#process.simCsctfTrackDigis.SectorProcessor.straightp = cms.uint32(60)
#process.simCsctfTrackDigis.SectorProcessor.curvedp = cms.uint32(200)
#process.simCsctfTrackDigis.SectorProcessor.firmwareSP = cms.uint32(20110204)
#process.simCsctfTrackDigis.SectorProcessor.EtaWindows = cms.vuint32(4,4,4,4,4,4,4)
# CSCTFEfficiency Analyzer
# defualt values
process.cscTFEfficiency = cms.EDAnalyzer('CSCTFEfficiency',
inputTag = cms.untracked.InputTag("csctfunpacker"),
MinPtSim = cms.untracked.double(2.0),
MaxPtSim = cms.untracked.double(500.0),
MinEtaSim = cms.untracked.double(0.9),
MaxEtaSim = cms.untracked.double(2.4),
MinPtTF = cms.untracked.double(-1),
MinQualityTF = cms.untracked.double(2),
GhostLoseParam = cms.untracked.string("Q"),
InputData = cms.untracked.bool(False),
MinMatchR = cms.untracked.double(0.5),
MinPtHist = cms.untracked.double(-0.5),
MaxPtHist = cms.untracked.double(140.5),
BinsPtHist = cms.untracked.double(70),
SaveHistImages = cms.untracked.bool(False),
SingleMuSample = cms.untracked.bool(False),
NoRefTracks = cms.untracked.bool(True),
StatsFilename = cms.untracked.string("/dev/null"),
PtEffStatsFilename = cms.untracked.string("/dev/null"),
type_of_data = cms.untracked.int32(0)
)
# Data Type Key
#------------------------------------------------------------
# Num | Name | track source | Mode info?
#------------------------------------------------------------
# 0 | L1CSCTrack | CSCs | yes
# 1 | L1MuRegionalCand | CSCs | no
# 2 | L1MuGMTExtendedCand | GMT | no
#process.cscTFEfficiency.type_of_data = 0
#=Controls the cut values for TF track selection
#process.cscTFEfficiency.MinQualityTF = 2
#=Use False to run Simulated Data or True for Real Data
#process.cscTFEfficiency.InputData = True
#=Controls the maximum R value for matching
#process.cscTFEfficiency.MaxMatchR = 0.5
#=Controls minimum value on x-axis of PT Hist
#process.cscTFEfficiency.MinPtHist = -0.5
#=Controls maximum value on x-axis of PT Hist
#process.cscTFEfficiency.MaxPtHist = 20.5
#=Controls the number of bins used to create the PT Hist
#process.cscTFEfficiency.BinsPtHist = 21
#=Controls the name of the statistics file output
#process.cscTFEfficiency.StatsFilename = statName
#=Controls output of validation histogram images:
#process.cscTFEfficiency.SaveHistImages = False
#=Controls Ghost Validation Counting (Default False):
#process.cscTFEfficiency.SingleMuSample = True
#=Controls ghost selection method. Use quality "Q" or match value "R" as metric
#=Best candidate is considered real track, others considered ghosts.
#=Default Q
#process.cscTFEfficiency.GhostLoseParam = "R"
#=Controls the name of the output file for the Pt Efficiency Stats
#process.cscTFEfficiency.PtEffStatsFilename = PtEffStatsName
process.TFileService = cms.Service("TFileService",
fileName = cms.string(
fileOutName
))
process.FEVT = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("testAnaData.root"),
outputCommands = cms.untracked.vstring(
"keep *"
)
)
process.p = cms.Path(process.csctfunpacker*process.cscTFEfficiency)
#to create testEfff.root
#process.outpath = cms.EndPath(process.FEVT)
|
autolens/point/point_dataset.py | Jammy2211/AutoLens | 114 | 11161652 | import json
import numpy as np
import os
from os import path
from typing import List, Tuple, Dict, Optional, Union
import autoarray as aa
class PointDataset:
def __init__(
self,
name: str,
positions: Union[aa.Grid2DIrregular, List[List], List[Tuple]],
positions_noise_map: Union[aa.ValuesIrregular, List[float]],
fluxes: Optional[Union[aa.ValuesIrregular, List[float]]] = None,
fluxes_noise_map: Optional[Union[aa.ValuesIrregular, List[float]]] = None,
):
"""
A collection of the data component that can be used for point-source model-fitting, for example fitting the
observed positions of a a strongly lensed quasar or supernovae or in strong lens cluster modeling, where
there may be many tens or hundreds of individual source galaxies each of which are modeled as a point source.
The name of the dataset is required for point-source model-fitting, as it pairs a point-source dataset with
its corresponding point-source in the model-fit. For example, if a dataset has the name `source_1`, it will
be paired with the `Point` model-component which has the name `source_1`. If a dataset component is not
successfully paired with a model-component, an error is raised.
Parameters
----------
name
The name of the point source dataset which is paired to a `Point` in the `Model`.
positions
The image-plane (y,x) positions of the point-source.
positions_noise_map
The noise-value of every (y,x) position, which is typically the pixel-scale of the data.
fluxes
The image-plane flux of each observed point-source of light.
fluxes_noise_map
The noise-value of every observed flux.
"""
self.name = name
if not isinstance(positions, aa.Grid2DIrregular):
positions = aa.Grid2DIrregular(grid=positions)
self.positions = positions
if not isinstance(positions_noise_map, aa.ValuesIrregular):
positions_noise_map = aa.ValuesIrregular(values=positions_noise_map)
self.positions_noise_map = positions_noise_map
if fluxes is not None:
if not isinstance(fluxes, aa.ValuesIrregular):
fluxes = aa.ValuesIrregular(values=fluxes)
self.fluxes = fluxes
if fluxes_noise_map is not None:
if not isinstance(fluxes_noise_map, aa.ValuesIrregular):
fluxes_noise_map = aa.ValuesIrregular(values=fluxes_noise_map)
self.fluxes_noise_map = fluxes_noise_map
@property
def dict(self) -> dict:
"""
A dictionary representation of this instance.
Arrays are represented as lists or lists of lists.
"""
return {
"name": self.name,
"positions": list(map(list, np.round(self.positions, 4))),
"positions_noise_map": list(self.positions_noise_map),
"fluxes": list(np.round(self.fluxes, 4))
if self.fluxes is not None
else None,
"fluxes_noise_map": list(self.fluxes_noise_map)
if self.fluxes_noise_map is not None
else None,
}
@classmethod
def from_dict(cls, dict_: dict) -> "PointDataset":
"""
Create a point source dataset from a dictionary representation.
Parameters
----------
dict_
A dictionary. Arrays are represented as lists or lists of lists.
Returns
-------
An instance
"""
return cls(
name=dict_["name"],
positions=aa.Grid2DIrregular(dict_["positions"]),
positions_noise_map=aa.ValuesIrregular(dict_["positions_noise_map"]),
fluxes=aa.ValuesIrregular(dict_["fluxes"])
if dict_["fluxes"] is not None
else None,
fluxes_noise_map=aa.ValuesIrregular(dict_["fluxes_noise_map"])
if dict_["fluxes_noise_map"] is not None
else None,
)
class PointDict(dict):
def __init__(self, point_dataset_list: List[PointDataset]):
"""
A dictionary containing the entire point-source dataset, which could be many instances of
the `PointDataset` object.
This dictionary uses the `name` of the `PointDataset` to act as the key of every entry of the dictionary,
making it straight forward to access the attributes based on the dataset name.
Parameters
----------
point_dataset_list
A list of all point-source datasets that are to be added to the point-source dictionary.
Returns
-------
Dict[PointDataset]
A dictionary where the keys are the `name` entries of each `PointDataset` and the values are
the corresponding instance of the `PointDataset` class.
"""
super().__init__()
for point_dataset in point_dataset_list:
self[point_dataset.name] = point_dataset
@property
def positions_list(self):
return [point_dataset.positions for keys, point_dataset in self.items()]
@property
def dicts(self) -> List[dict]:
"""
A list of dictionaries representing this collection of point source
datasets.
"""
return [dataset.dict for dataset in self.values()]
@classmethod
def from_dicts(cls, dicts: List[dict]) -> List[PointDataset]:
"""
Create an instance from a list of dictionaries.
Parameters
----------
dicts
Dictionaries, each representing one point source dataset.
Returns
-------
A collection of point source datasets.
"""
return cls(map(PointDataset.from_dict, dicts))
@classmethod
def from_json(cls, file_path):
with open(file_path) as infile:
dicts = json.load(infile)
return cls.from_dicts(dicts=dicts)
def output_to_json(self, file_path, overwrite=False):
file_dir = os.path.split(file_path)[0]
if not path.exists(file_dir):
os.makedirs(file_dir)
if overwrite and path.exists(file_path):
os.remove(file_path)
elif not overwrite and path.exists(file_path):
raise FileExistsError(
"The file ",
file_path,
" already exists. Set overwrite=True to overwrite this" "file",
)
with open(file_path, "w+") as f:
json.dump(self.dicts, f, indent=4)
|
Packs/ReversingLabs_Titanium_Cloud/Integrations/ReversingLabsTitaniumCloudv2/ReversingLabsTitaniumCloudv2.py | diCagri/content | 799 | 11161654 | <reponame>diCagri/content<filename>Packs/ReversingLabs_Titanium_Cloud/Integrations/ReversingLabsTitaniumCloudv2/ReversingLabsTitaniumCloudv2.py
from typing import Union
import demistomock as demisto
from CommonServerPython import *
from ReversingLabs.SDK.ticloud import FileReputation, AVScanners, FileAnalysis, RHA1FunctionalSimilarity, \
RHA1Analytics, URIStatistics, URIIndex, AdvancedSearch, ExpressionSearch, FileDownload, FileUpload, \
URLThreatIntelligence, AnalyzeURL, DynamicAnalysis, CertificateAnalytics
VERSION = "v2.0.0"
USER_AGENT = f"ReversingLabs XSOAR TitaniumCloud {VERSION}"
TICLOUD_URL = demisto.params().get("base")
USERNAME = demisto.params().get("credentials", {}).get("identifier")
PASSWORD = demisto.params().get("credentials", {}).get("password")
RELIABILITY = demisto.params().get("reliability", "C - Fairly reliable")
def classification_to_score(classification):
score_dict = {
"UNKNOWN": 0,
"KNOWN": 1,
"SUSPICIOUS": 2,
"MALICIOUS": 3
}
return score_dict.get(classification, 0)
def test_module_command():
mwp = FileReputation(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
try:
_ = mwp.get_file_reputation(hash_input="6a95d3d00267c9fd80bd42122738e726")
except Exception as e:
return_error(str(e))
result = "ok"
return_results(result)
def file_reputation_command():
mwp = FileReputation(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
hash_value = demisto.getArg("hash")
try:
response = mwp.get_file_reputation(hash_input=hash_value)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = file_reputation_output(response_json=response_json, hash_value=hash_value)
return_results(results)
def file_reputation_output(response_json, hash_value):
malware_presence = response_json.get("rl", {}).get("malware_presence")
if not malware_presence:
return_error("There is no malware_presence object in the response JSON.")
classification = malware_presence.get("status")
reason = malware_presence.get("reason")
threat_name = malware_presence.get("threat_name")
md5 = malware_presence.get("md5")
sha1 = malware_presence.get("sha1")
sha256 = malware_presence.get("sha256")
markdown = f"""## ReversingLabs File Reputation for hash {hash_value}\n **Classification**: {classification}
**Classification reason**: {reason}
**First seen**: {malware_presence.get("first_seen")}
**Last seen**: {malware_presence.get("last_seen")}
**AV scanner hits / total number of scanners**: {malware_presence.get("scanner_match")} / {malware_presence.get(
"scanner_count")}
**AV scanner hit percentage**: {malware_presence.get("scanner_percent")}%
**MD5 hash**: {md5}
**SHA-1 hash**: {sha1}
**SHA-256 hash**: {sha256}"""
if classification.upper() in ("MALICIOUS", "SUSPICIOUS"):
markdown = f"""{markdown}
**Threat name**: {threat_name}
**Threat level**: {malware_presence.get("threat_level")}
"""
elif classification.upper() == "KNOWN":
markdown = f"""{markdown}
**Trust factor**: {malware_presence.get("trust_factor")}
"""
else:
markdown = f"""## ReversingLabs File Reputation for hash {hash_value}\n **Classification**: {classification}
**No references were found for this hash.**
"""
d_bot_score = classification_to_score(classification)
dbot_score = Common.DBotScore(
indicator=sha1,
indicator_type=DBotScoreType.FILE,
integration_name='ReversingLabs TitaniumCloud v2',
score=d_bot_score,
malicious_description=f"{reason} - {threat_name}",
reliability=RELIABILITY
)
indicator = Common.File(
md5=md5,
sha1=sha1,
sha256=sha256,
dbot_score=dbot_score
)
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'file_reputation': response_json},
readable_output=markdown,
indicator=indicator
)
return results
def av_scanners_command():
xref = AVScanners(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
hash_value = demisto.getArg("hash")
try:
response = xref.get_scan_results(hash_input=hash_value)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = av_scanners_output(response_json=response_json, hash_value=hash_value)
return_results(results)
def av_scanners_output(response_json, hash_value):
sample = response_json.get("rl", {}).get("sample")
if not sample:
return_error("There is no sample object in the response JSON.")
md5 = sample.get("md5")
sha1 = sample.get("sha1")
sha256 = sample.get("sha256")
markdown = f"""## ReversingLabs AV Scan results for hash {hash_value}\n **First scanned on**: {sample.get(
"first_scanned_on")}
**First seen on**: {sample.get("first_seen_on")}
**Last scanned on**: {sample.get("last_scanned_on")}
**Last seen on**: {sample.get("last_seen_on")}
**Sample size**: {sample.get("sample_size")} bytes
**Sample type**: {sample.get("sample_type")}
**MD5 hash**: {md5}
**SHA-1 hash**: {sha1}
**SHA-256 hash**: {sha256}
**SHA-512 hash**: {sample.get("sha512")}
**SHA-384 hash**: {sample.get("sha384")}
**RIPEMD-160 hash**: {sample.get("ripemd160")}
"""
xref_list = sample.get("xref")
if xref_list and len(xref_list) > 0:
latest_xref = xref_list[0]
xref_results = latest_xref.get("results")
if len(xref_results) > 0:
markdown = f"""{markdown}**Scanner count**: {latest_xref.get("scanner_count")}
**Scanner match**: {latest_xref.get("scanner_match")}
"""
results_table = tableToMarkdown("Latest scan results", xref_results)
markdown = f"{markdown}\n{results_table}"
dbot_score = Common.DBotScore(
indicator=hash_value,
indicator_type=DBotScoreType.FILE,
integration_name='ReversingLabs TitaniumCloud v2',
score=0,
)
indicator = Common.File(
md5=md5,
sha1=sha1,
sha256=sha256,
dbot_score=dbot_score
)
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'av_scanners': response_json},
readable_output=markdown,
indicator=indicator
)
return results
def file_analysis_command():
rldata = FileAnalysis(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
hash_value = demisto.getArg("hash")
try:
response = rldata.get_analysis_results(hash_input=hash_value)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = file_analysis_output(response_json=response_json, hash_value=hash_value)
file_results = fileResult(
f'File Analysis report file for hash {hash_value}',
json.dumps(response_json, indent=4),
file_type=EntryType.ENTRY_INFO_FILE
)
return_results([results, file_results])
def file_analysis_output(response_json, hash_value):
sample = response_json.get("rl", {}).get("sample")
if not sample:
return_error("There is no sample object in the response JSON.")
md5 = sample.get("md5")
sha1 = sample.get("sha1")
sha256 = sample.get("sha256")
entries = sample.get("analysis").get("entries")
if len(entries) == 0:
return_error("The entries list is empty")
tc_report = entries[0].get("tc_report")
file_type = tc_report.get("info").get("file").get("file_type")
file_subtype = tc_report.get("info").get("file").get("file_subtype")
rldata_xref = sample.get("xref")
markdown = f"""## ReversingLabs File Analysis results for hash {hash_value}\n **File type**: {file_type}
**File subtype**: {file_subtype}
**Sample type**: {rldata_xref.get("sample_type")}
**Sample size**: {sample.get("sample_size")} bytes
**Extended description**: {tc_report.get("story")}
**First seen**: {rldata_xref.get("first_seen")}
**Last seen**: {rldata_xref.get("last_seen")}
**MD5 hash**: {sample.get("md5")}
**SHA-1 hash**: {sample.get("sha1")}
**SHA-256 hash**: {sample.get("sha256")}
**SHA-384 hash**: {sample.get("sha384")}
**SHA-512 hash**: {sample.get("sha512")}
**SSDEEP hash**: {sample.get("ssdeep")}
**RIPEMD-160 hash**: {sample.get("ripemd160")}
"""
dbot_score = Common.DBotScore(
indicator=hash_value,
indicator_type=DBotScoreType.FILE,
integration_name='ReversingLabs TitaniumCloud v2',
score=0,
)
indicator = Common.File(
md5=md5,
sha1=sha1,
sha256=sha256,
dbot_score=dbot_score
)
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'file_analysis': response_json},
readable_output=markdown,
indicator=indicator
)
return results
def functional_similarity_command():
similarity = RHA1FunctionalSimilarity(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
hash_value = demisto.getArg("hash")
limit = demisto.getArg("result_limit")
try:
sha1_list = similarity.get_similar_hashes_aggregated(hash_input=hash_value, max_results=int(limit))
except Exception as e:
return_error(str(e))
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'functional_similarity': sha1_list},
readable_output="Full report is returned in a downloadable file"
)
file_results = fileResult(
f'RHA1 Functional Similarity report file for hash {hash_value}',
json.dumps(sha1_list, indent=4),
file_type=EntryType.ENTRY_INFO_FILE
)
return_results([results, file_results])
def rha1_analytics_command():
rha_analytics = RHA1Analytics(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
hash_value = demisto.getArg("hash")
try:
response = rha_analytics.get_rha1_analytics(hash_input=hash_value)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = rha1_analytics_output(response_json=response_json, hash_value=hash_value)
return_results(results)
def rha1_analytics_output(response_json, hash_value):
rha1_counters = response_json.get("rl", {}).get("rha1_counters")
if not rha1_counters:
return_error("There is no rha1_counters object in the response JSON.")
md5 = demisto.get(rha1_counters, "sample_metadata.md5")
sha1 = hash_value
sha256 = demisto.get(rha1_counters, "sample_metadata.sha256")
sample_counters = rha1_counters.get("sample_counters")
sample_metadata = rha1_counters.get("sample_metadata")
classification = sample_metadata.get("classification")
threat_name = sample_metadata.get("threat_name")
markdown = f"""## ReversingLabs RHA1 Analytics results for hash {sha1}\n ### Sample counters\n **KNOWN**: {
sample_counters.get("known")}
**MALICIOUS**: {sample_counters.get("malicious")}
**SUSPICIOUS**: {sample_counters.get("suspicious")}
**TOTAL**: {sample_counters.get("total")}\n ### Sample metadata\n **Classification**: {classification}
**MD5 hash**: {md5}
**SHA-256 hash**: {sha256}
**First seen**: {sample_metadata.get("first_seen")}
**Last seen**: {sample_metadata.get("last_seen")}
**Sample available**: {sample_metadata.get("sample_available")}
**Sample size**: {sample_metadata.get("sample_size")} bytes
**Sample type**: {sample_metadata.get("sample_type")}"""
if classification.upper() in ("MALICIOUS", "SUSPICIOUS"):
markdown = f"""{markdown}
**Threat name**: {threat_name}
**Threat level**: {sample_metadata.get("threat_level")}"""
else:
markdown = f"""{markdown}
**Trust factor**: {sample_metadata.get("trust_factor")}"""
d_bot_score = classification_to_score(classification)
dbot_score = Common.DBotScore(
indicator=sha1,
indicator_type=DBotScoreType.FILE,
integration_name='ReversingLabs TitaniumCloud v2',
score=d_bot_score,
malicious_description=threat_name,
reliability=RELIABILITY
)
indicator = Common.File(
md5=md5,
sha1=sha1,
sha256=sha256,
dbot_score=dbot_score
)
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'rha1_analytics': response_json},
readable_output=markdown,
indicator=indicator
)
return results
def uri_statistics_command():
uri_stats = URIStatistics(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
uri = demisto.getArg("uri")
try:
response = uri_stats.get_uri_statistics(uri_input=uri)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = uri_statistics_output(response_json=response_json, uri=uri)
return_results(results)
def uri_statistics_output(response_json, uri):
uri_state = response_json.get("rl", {}).get("uri_state")
if not uri_state:
return_error("There is no uri_state object in the response JSON.")
counters = uri_state.get("counters")
uri_type = uri_state.get("uri_type")
uri_types = {
"domain": f"**Domain**: {uri}",
"url": f"**URL**: {uri}",
"ipv4": f"**IPv4**: {uri}",
"email": f"**Email**: {uri}"
}
markdown = f"""## ReversingLabs URI Statistics results for URI {uri}\n ### Sample counters\n **KNOWN**: {
counters.get("known")}
**MALICIOUS**: {counters.get("malicious")}
**SUSPICIOUS**: {counters.get("suspicious")}
**SHA-1 hash**: {uri_state.get("sha1")}
**URI type**: {uri_type}
{uri_types.get(uri_type)}"""
indicator: Union[Common.Domain, Common.URL, Common.IP, Common.EMAIL, None] = None
if uri_type == "domain":
indicator = Common.Domain(
domain=uri,
dbot_score=Common.DBotScore(
indicator=uri,
indicator_type=DBotScoreType.DOMAIN,
integration_name='ReversingLabs TitaniumCloud v2',
score=0,
)
)
elif uri_type == "url":
indicator = Common.URL(
url=uri,
dbot_score=Common.DBotScore(
indicator=uri,
indicator_type=DBotScoreType.URL,
integration_name='ReversingLabs TitaniumCloud v2',
score=0,
)
)
elif uri_type == "ipv4":
indicator = Common.IP(
ip=uri,
dbot_score=Common.DBotScore(
indicator=uri,
indicator_type=DBotScoreType.IP,
integration_name='ReversingLabs TitaniumCloud v2',
score=0,
)
)
elif uri_type == "email":
indicator = Common.EMAIL(
address=uri,
dbot_score=Common.DBotScore(
indicator=uri,
indicator_type=DBotScoreType.EMAIL,
integration_name='ReversingLabs TitaniumCloud v2',
score=0,
)
)
else:
return_error("This integration does not currently support this URI type")
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'uri_statistics': response_json},
readable_output=markdown,
indicator=indicator
)
return results
def uri_index_command():
uri_index = URIIndex(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
uri = demisto.getArg("uri")
limit = demisto.getArg("result_limit")
try:
sha1_list = uri_index.get_uri_index_aggregated(uri_input=uri, max_results=int(limit))
except Exception as e:
return_error(str(e))
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'uri_index': sha1_list},
readable_output="Full report is returned in a downloadable file"
)
file_results = fileResult(
f'URI Index report file for URI {uri}',
json.dumps(sha1_list, indent=4),
file_type=EntryType.ENTRY_INFO_FILE
)
return_results([results, file_results])
def advanced_search_command():
advanced_search = AdvancedSearch(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
query = demisto.getArg("query")
limit = demisto.getArg("result_limit")
try:
result_list = advanced_search.search_aggregated(query_string=query, max_results=int(limit))
except Exception as e:
return_error(str(e))
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'advanced_search': result_list},
readable_output="Full report is returned in a downloadable file"
)
file_results = fileResult(
'Advanced Search report file',
json.dumps(result_list, indent=4),
file_type=EntryType.ENTRY_INFO_FILE
)
return_results([results, file_results])
def expression_search_command():
expression_search = ExpressionSearch(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
query = demisto.getArg("query")
date = demisto.getArg("date")
limit = demisto.getArg("result_limit")
query_list = query.split(" ")
try:
result_list = expression_search.search_aggregated(
query=query_list,
date=date,
max_results=int(limit)
)
except Exception as e:
return_error(str(e))
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'expression_search': result_list},
readable_output="Full report is returned in a downloadable file"
)
file_results = fileResult(
'Expression Search report file',
json.dumps(result_list, indent=4),
file_type=EntryType.ENTRY_INFO_FILE
)
return_results([results, file_results])
def file_download_command():
file_download = FileDownload(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
hash_value = demisto.getArg("hash")
try:
response = file_download.download_sample(hash_input=hash_value)
except Exception as e:
return_error(str(e))
results = CommandResults(
readable_output=f"Requested sample is available for download under the name {hash_value}"
)
return_results([results, fileResult(hash_value, response.content)])
def file_upload_command():
file_upload = FileUpload(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
file_entry = demisto.getFilePath(demisto.getArg("entryId"))
filename = file_entry["name"]
with open(file_entry["path"], "rb") as file_handle:
_ = file_upload.upload_sample_from_file(file_handle=file_handle, sample_name=filename)
results = CommandResults(
readable_output=f"Successfully uploaded file {filename}"
)
return_results(results)
def url_report_command():
url_ti = URLThreatIntelligence(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
url = demisto.getArg("url")
try:
response = url_ti.get_url_report(url_input=url)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = url_report_output(response_json=response_json, url=url)
return_results(results)
def url_report_output(response_json, url):
report_base = response_json.get("rl")
if not report_base:
return_error("There is no rl object in the response JSON.")
classification = report_base.get("classification", "UNAVAILABLE").upper()
markdown = f"""## ReversingLabs URL Threat Intelligence report for URL {url}\n **Requested URL**: {report_base.get(
"requested_url")}
**Classification**: {classification}"""
analysis = report_base.get("analysis")
if analysis:
statistics = analysis.get("statistics")
analysis_history = analysis.get("analysis_history")
last_analysis = analysis.get("last_analysis")
markdown += f"""\n **First analysis**: {analysis.get("first_analysis")}
**Analysis count**: {analysis.get("analysis_count")}\n ### Last analysis\n **Analysis ID**: {last_analysis.get(
"analysis_id")}
**Analysis time**: {last_analysis.get("analysis_time")}
**Final URL**: {last_analysis.get("final_url")}
**Availability status**: {last_analysis.get("availability_status")}
**Domain**: {last_analysis.get("domain")}
**Serving IP Address**: {last_analysis.get("serving_ip_address")}\n ### Statistics\n **KNOWN**: {statistics.get(
"known")}
**SUSPICIOUS**: {statistics.get("suspicious")}
**MALICIOUS**: {statistics.get("malicious")}
**UNKNOWN**: {statistics.get("unknown")}
**TOTAL**: {statistics.get("total")}"""
analysis_table = tableToMarkdown("Analysis history", analysis_history)
markdown = f"{markdown}\n {analysis_table}"
third_party = report_base.get("third_party_reputations")
if third_party:
third_party_statistics = third_party.get("statistics")
third_party_sources = third_party.get("sources")
markdown += f"""\n ### Third party statistics\n **TOTAL**: {third_party_statistics.get("total")}
**MALICIOUS**: {third_party_statistics.get("malicious")}
**CLEAN**: {third_party_statistics.get("clean")}
**UNDETECTED**: {third_party_statistics.get("undetected")}\n"""
sources_table = tableToMarkdown("Third party sources", third_party_sources)
markdown = f"{markdown}\n {sources_table}"
d_bot_score = classification_to_score(classification)
dbot_score = Common.DBotScore(
indicator=url,
indicator_type=DBotScoreType.URL,
integration_name='ReversingLabs TitaniumCloud v2',
score=d_bot_score,
malicious_description=classification.upper(),
reliability=RELIABILITY
)
indicator = Common.URL(
url=url,
dbot_score=dbot_score
)
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'url_report': response_json},
readable_output=markdown,
indicator=indicator
)
return results
def analyze_url_command():
analyze_url = AnalyzeURL(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
url = demisto.getArg("url")
try:
response = analyze_url.submit_url(url_input=url)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = analyze_url_output(response_json=response_json, url=url)
return_results(results)
def analyze_url_output(response_json, url):
report_base = response_json.get("rl", {})
markdown = f"""## ReversingLabs Analyze URL response for URL {url}\n **Status**: {report_base.get("status")}
**Analysis ID**: {report_base.get("analysis_id")}
**Requested URL**: {report_base.get("requested_url")}"""
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'analyze_url': response_json},
readable_output=markdown
)
return results
def detonate_sample_command():
sandbox = DynamicAnalysis(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
sha1 = demisto.getArg("sha1")
platform = demisto.getArg("platform")
try:
response = sandbox.detonate_sample(sample_sha1=sha1, platform=platform)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = detonate_sample_output(response_json=response_json, sha1=sha1)
return_results(results)
def detonate_sample_output(response_json, sha1):
report_base = response_json.get("rl", {})
markdown = f"""## ReversingLabs submit sample {sha1} for Dynamic Analysis\n **Status**: {report_base.get("status")}
**Requested hash**: {report_base.get("requested_hash")}
**Analysis ID**: {report_base.get("analysis_id")}"""
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'detonate_sample_dynamic': response_json},
readable_output=markdown
)
return results
def dynamic_analysis_results_command():
sandbox = DynamicAnalysis(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
sha1 = demisto.getArg("sha1")
try:
response = sandbox.get_dynamic_analysis_results(sample_hash=sha1, latest=True)
except Exception as e:
return_error(str(e))
response_json = response.json()
dbot_score = Common.DBotScore(
indicator=sha1,
indicator_type=DBotScoreType.FILE,
integration_name='ReversingLabs TitaniumCloud v2',
score=0
)
indicator = Common.File(
sha1=sha1,
dbot_score=dbot_score
)
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'dynamic_analysis_results': response_json},
readable_output="Full report is returned in a downloadable file",
indicator=indicator
)
file_results = fileResult(
f'Dynamic analysis report file for sample {sha1}',
json.dumps(response_json, indent=4),
file_type=EntryType.ENTRY_INFO_FILE
)
return_results([results, file_results])
def certificate_analytics_command():
cert_analytics = CertificateAnalytics(
host=TICLOUD_URL,
username=USERNAME,
password=PASSWORD
)
thumbprint = demisto.getArg("certificate_thumbprint")
try:
response = cert_analytics.get_certificate_analytics(certificate_thumbprints=thumbprint)
except Exception as e:
return_error(str(e))
response_json = response.json()
results = CommandResults(
outputs_prefix='ReversingLabs',
outputs={'certificate_analytics': response_json},
readable_output="Full report is returned in a downloadable file"
)
file_results = fileResult(
f'Certificate Analytics report file for thumbprint {thumbprint}',
json.dumps(response_json, indent=4),
file_type=EntryType.ENTRY_INFO_FILE
)
return_results([results, file_results])
def main():
command = demisto.command()
if command == "test-module":
test_module_command()
elif command == "reversinglabs-titaniumcloud-file-reputation":
file_reputation_command()
elif command == "reversinglabs-titaniumcloud-av-scanners":
av_scanners_command()
elif command == "reversinglabs-titaniumcloud-file-analysis":
file_analysis_command()
elif command == "reversinglabs-titaniumcloud-rha1-functional-similarity":
functional_similarity_command()
elif command == "reversinglabs-titaniumcloud-rha1-analytics":
rha1_analytics_command()
elif command == "reversinglabs-titaniumcloud-uri-statistics":
uri_statistics_command()
elif command == "reversinglabs-titaniumcloud-uri-index":
uri_index_command()
elif command == "reversinglabs-titaniumcloud-advanced-search":
advanced_search_command()
elif command == "reversinglabs-titaniumcloud-expression-search":
expression_search_command()
elif command == "reversinglabs-titaniumcloud-file-download":
file_download_command()
elif command == "reversinglabs-titaniumcloud-file-upload":
file_upload_command()
elif command == "reversinglabs-titaniumcloud-url-report":
url_report_command()
elif command == "reversinglabs-titaniumcloud-analyze-url":
analyze_url_command()
elif command == "reversinglabs-titaniumcloud-submit-for-dynamic-analysis":
detonate_sample_command()
elif command == "reversinglabs-titaniumcloud-get-dynamic-analysis-results":
dynamic_analysis_results_command()
elif command == "reversinglabs-titaniumcloud-certificate-analytics":
certificate_analytics_command()
else:
return_error(f"Command {command} does not exist")
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
src/genie/libs/parser/iosxe/tests/ShowIpVrfDetail/cli/equal/golden_output3_expected.py | balmasea/genieparser | 204 | 11161677 | <filename>src/genie/libs/parser/iosxe/tests/ShowIpVrfDetail/cli/equal/golden_output3_expected.py
expected_output = {
"Mgmt-intf": {
"vrf_id": 1,
"cli_format": "New",
"support_af": "multiple address-families",
"flags": "0x1808",
"interfaces": ["GigabitEthernet0"],
"interface": {"GigabitEthernet0": {"vrf": "Mgmt-intf"}},
"address_family": {
"none": {
"table_id": "1",
"flags": "0x0",
"vrf_label": {"allocation_mode": "per-prefix"},
}
},
}
}
|
tests/test_utils/test_make_divisible.py | wzpscott/SegformerDistillation | 903 | 11161683 | <gh_stars>100-1000
from mmseg.models.utils import make_divisible
def test_make_divisible():
# test with min_value = None
assert make_divisible(10, 4) == 12
assert make_divisible(9, 4) == 12
assert make_divisible(1, 4) == 4
# test with min_value = 8
assert make_divisible(10, 4, 8) == 12
assert make_divisible(9, 4, 8) == 12
assert make_divisible(1, 4, 8) == 8
|
src/sage/libs/pynac/pynac.py | UCD4IDS/sage | 1,742 | 11161696 | from sage.misc.lazy_import import lazy_import
lazy_import('sage.symbolic.expression',
['unpack_operands', 'paramset_from_Expression', 'get_ginac_serial', 'get_fn_serial',
'py_latex_variable_for_doctests', 'py_print_function_pystring',
'py_latex_function_pystring', 'tolerant_is_symbol', 'py_print_fderivative_for_doctests',
'py_latex_fderivative_for_doctests', 'test_binomial', 'py_real_for_doctests',
'py_imag_for_doctests', 'py_is_integer_for_doctests', 'py_is_crational_for_doctest',
'py_numer_for_doctests', 'py_denom_for_doctests', 'py_is_cinteger_for_doctest',
'py_float_for_doctests', 'py_tgamma_for_doctests', 'py_factorial_py', 'doublefactorial',
'py_stieltjes_for_doctests', 'py_zeta_for_doctests', 'py_exp_for_doctests',
'py_log_for_doctests', 'py_atan2_for_doctests', 'py_lgamma_for_doctests',
'py_mod_for_doctests', 'py_li_for_doctests', 'py_psi_for_doctests',
'py_psi2_for_doctests', 'py_li2_for_doctests', 'py_eval_unsigned_infinity_for_doctests',
'py_eval_infinity_for_doctests', 'py_eval_neg_infinity_for_doctests',
'register_symbol', 'init_pynac_I', 'init_function_table'],
deprecation=32386)
|
packages/scikit-learn/examples/plot_tsne.py | zmoon/scipy-lecture-notes | 2,538 | 11161708 | <filename>packages/scikit-learn/examples/plot_tsne.py
"""
==========================
tSNE to visualize digits
==========================
Here we use :class:`sklearn.manifold.TSNE` to visualize the digits
datasets. Indeed, the digits are vectors in a 8*8 = 64 dimensional space.
We want to project them in 2D for visualization. tSNE is often a good
solution, as it groups and separates data points based on their local
relationship.
"""
############################################################
# Load the iris data
from sklearn import datasets
digits = datasets.load_digits()
# Take the first 500 data points: it's hard to see 1500 points
X = digits.data[:500]
y = digits.target[:500]
############################################################
# Fit and transform with a TSNE
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
############################################################
# Project the data in 2D
X_2d = tsne.fit_transform(X)
############################################################
# Visualize the data
target_ids = range(len(digits.target_names))
from matplotlib import pyplot as plt
plt.figure(figsize=(6, 5))
colors = 'r', 'g', 'b', 'c', 'm', 'y', 'k', 'w', 'orange', 'purple'
for i, c, label in zip(target_ids, colors, digits.target_names):
plt.scatter(X_2d[y == i, 0], X_2d[y == i, 1], c=c, label=label)
plt.legend()
plt.show()
|
platform_tools/android/tests/ordered_set_tests.py | AsdMonio/rr-external_skia | 5,964 | 11161724 | <filename>platform_tools/android/tests/ordered_set_tests.py<gh_stars>1000+
#!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test OrderedSet.
"""
import sys
import test_variables
import unittest
sys.path.append(test_variables.GYP_GEN_DIR)
from vars_dict_lib import OrderedSet
def create_dummy_var(i):
return 'dummy_var' + str(i)
class OrderedSetTest(unittest.TestCase):
def setUp(self):
self.__set = OrderedSet()
def test_methods(self):
"""Test methods on OrderedSet.
"""
RANGE = 10
for i in range(RANGE):
dummy_var = create_dummy_var(i)
# Add to the list. This should succeed.
self.__set.add(dummy_var)
self.assertEqual(len(self.__set), i+1)
self.assertTrue(dummy_var in self.__set)
self.assertEqual(self.__set[i], dummy_var)
# Now attempt to add it again. This should fail.
self.__set.add(dummy_var)
self.assertEqual(len(self.__set), i+1)
self.assertEqual(self.__set[i], dummy_var)
# Test iterator.
counter = 0
for set_member in self.__set:
self.assertEqual(create_dummy_var(counter), set_member)
counter += 1
self.assertEqual(counter, len(self.__set))
# Now test removal.
for i in range(RANGE):
dummy_var = create_dummy_var(i)
self.__set.remove(dummy_var)
self.assertEqual(len(self.__set), RANGE-i-1)
self.assertFalse(dummy_var in self.__set)
# Test reset(), for a range of ranges.
for subrange in range(RANGE):
for i in range(subrange):
self.__set.add(create_dummy_var(i))
self.assertEqual(len(self.__set), subrange)
self.__set.reset()
self.assertEqual(len(self.__set), 0)
def test_set(self):
"""Test OrderedSet.set().
"""
# Create a set with dummy values.
my_set = OrderedSet()
RANGE = 10
for i in range(RANGE):
my_set.add(create_dummy_var(i))
my_len = len(my_set)
self.assertEqual(my_len, RANGE)
# Copy it to another set.
other_set = OrderedSet()
self.assertEqual(len(other_set), 0)
other_set.set(my_set)
# Both sets should contain the same values, in the same order.
iterator = iter(my_set)
for item in other_set:
self.assertTrue(item == iterator.next())
with self.assertRaises(StopIteration):
iterator.next()
self.assertEqual(my_len, len(other_set))
# But the sets are different. Changing one will not affect the other.
self.assertFalse(other_set is my_set)
other_var = 'something_else'
other_set.add(other_var)
self.assertEqual(my_len + 1, len(other_set))
self.assertEqual(my_len, len(my_set))
self.assertNotIn(other_var, my_set)
def main():
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(OrderedSetTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
|
src/naarad/metrics/innotop_metric.py | richardhsu/naarad | 180 | 11161786 | <reponame>richardhsu/naarad
# coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import naarad.utils
import logging
import os
from naarad.metrics.metric import Metric
logger = logging.getLogger('naarad.metrics.INNOMetric')
class INNOMetric(Metric):
C_MAX_COMMANDS = 10
graph_lib = None
def __init__(self, metric_type, infile, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics,
anomaly_detection_metrics, **other_options):
Metric.__init__(self, metric_type, infile, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics,
anomaly_detection_metrics)
for (key, val) in other_options.iteritems():
setattr(self, key, val.split())
def get_csv_C(self, command, column):
outcsv = os.path.join(self.resource_directory, "{0}.{1}.{2}.csv".format(self.metric_type, command, column))
self.csv_column_map[outcsv] = command + '.' + column
return outcsv
def parse(self):
logger.info("Working on innotop metric: %s", self.infile)
if self.metric_type == "INNOTOP-C":
return self.parse_innotop_mode_c()
elif self.metric_type == "INNOTOP-M":
return self.parse_innotop_mode_m()
else:
return self.parse_innotop_mode_b()
def parse_innotop_mode_c(self):
with open(self.infile, 'r') as infh:
headerline = infh.readline()
columns = headerline.split()[2:]
outfilehandlers = {}
for line in infh:
l = line.strip().split(' ', 1)
if len(l) <= 1:
continue
ts = l[0].strip().replace('T', ' ')
try:
nameval = l[1].strip().split('\t', 1)
except IndexError:
logger.warn("Badly formatted line: %s", line)
logger.warn("Expected tab separated values")
continue
command = nameval[0]
if command not in outfilehandlers:
# Only looking at top N commands
if len(outfilehandlers) > self.C_MAX_COMMANDS:
continue
# TODO(rmaheshw) : Use collections.defaultdict instead to avoid initializing dicts
outfilehandlers[command] = {}
words = nameval[1].split('\t')
for i in range(len(words)):
if self.options and columns[i] not in self.options:
continue
if columns[i] not in outfilehandlers[command]:
outfilehandlers[command][columns[i]] = open(self.get_csv_C(command, columns[i]), 'w')
self.csv_files.append(self.get_csv_C(command, columns[i]))
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
outfilehandlers[command][columns[i]].write(ts + ',')
outfilehandlers[command][columns[i]].write(words[i])
outfilehandlers[command][columns[i]].write('\n')
for command in outfilehandlers:
for column in outfilehandlers[command]:
outfilehandlers[command][column].close()
return True
def parse_innotop_mode_b(self):
""" Generic parsing method for all other modes """
with open(self.infile, 'r') as infh:
# Pre processing to figure out different headers
max_row_quot = 0
valrow = -1
thisrowcolumns = {}
data = {}
while True:
line1 = infh.readline()
words = line1.split()
# special case for -I (iostat) option
# skipping all the 'thread' lines
if words[1] == "thread" and self.metric_type == "INNOTOP-I":
while True:
line1 = infh.readline()
words = line1.split()
if naarad.utils.is_number(words[1]):
line1 = infh.readline()
else:
break
if words[1] == "thread" and self.metric_type == "INNOTOP-R":
break
# Skip next line
infh.readline()
last_ts = words[0].strip().replace('T', ' ')
if not naarad.utils.is_number(words[1]):
thisrowcolumns[max_row_quot] = words[1:]
for column in words[1:]:
if self.options and column not in self.options:
continue
data[column] = []
if self.metric_type == "INNOTOP-I":
data["check_pt_age"] = []
max_row_quot += 1
else:
break
# infh.seek(0)
# Real Processing
for line in infh:
l = line.strip().split(' ', 1)
if len(l) <= 1:
continue
ts = l[0].strip().replace('T', ' ')
if not ts == last_ts:
last_ts = ts
valrow = -1
try:
words = l[1].strip().split('\t')
except IndexError:
logger.warn("Bad line: %s", line)
continue
# special case for -I (iostat) option
# skipping all the 'thread' lines
if words[0] == "thread" or (naarad.utils.is_number(words[0]) and "thread" in words[1]):
continue
if naarad.utils.is_number(words[0]):
valrow += 1
quot = valrow % max_row_quot
# Special case for -R, skipping all 'thread' value lines
if quot >= len(thisrowcolumns):
continue
columns = thisrowcolumns[quot]
if len(words) > len(columns):
continue
for i in range(len(words)):
if self.options and columns[i] not in self.options:
continue
column = columns[i]
# Converting -- to 0, seen this for buf_pool_hit_rate
if words[i] == "--":
words[i] = "0"
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
# Calculating check point age
if self.metric_type == "INNOTOP-I":
if column == "log_seq_no":
log_seq_no = int(words[i])
elif column == "log_flushed_to":
check_pt_age = log_seq_no - int(words[i])
tup = [ts, str(check_pt_age)]
data["check_pt_age"].append(tup)
tup = [ts, words[i]]
data[column].append(tup)
# Post Proc, writing the different out files
for column in data:
csvfile = self.get_csv(column)
self.csv_files.append(csvfile)
with open(csvfile, 'w') as outfh:
for tup in data[column]:
outfh.write(','.join(tup))
outfh.write('\n')
return True
def parse_innotop_mode_m(self):
""" Special parsing method for Innotop "Replication Status" results (innotop --mode M)"""
with open(self.infile, 'r') as infh:
# Pre processing to figure out different headers
max_row_quot = 0
valrow = -1
thisrowcolumns = {}
data = {}
last_ts = None
while True:
# 2012-05-11T00:00:02 master_host slave_sql_running time_behind_master slave_catchup_rate slave_open_temp_tables relay_log_pos last_error
line1 = infh.readline()
words = line1.split()
# Skip next line
infh.readline()
is_header = True
for word in words:
if naarad.utils.is_number(word):
last_ts = words[0].strip().replace('T', ' ')
is_header = False
break # from this loop
if len(words) > 2 and is_header:
thisrowcolumns[max_row_quot] = words[2:]
for column in thisrowcolumns[max_row_quot]:
data[column] = []
max_row_quot += 1
else:
break
# from pre-processing. All headers accounted for
# Real Processing
if not last_ts:
logger.warn("last_ts not set, looks like there is no data in file %s", self.infile)
return True
infh.seek(0)
is_bad_line = False
outfilehandlers = {}
for line in infh:
l = line.strip().split(' ', 1)
# Blank line
if len(l) <= 1:
continue
ts = l[0].strip().replace('T', ' ')
if ts != last_ts:
last_ts = ts
valrow = -1
nameval = l[1].strip().split('\t', 1)
try:
words = nameval[1].split('\t')
except IndexError:
logger.warn("Bad line: %s", line)
continue
valrow += 1
command = nameval[0]
if command not in outfilehandlers:
outfilehandlers[command] = {}
quot = valrow % max_row_quot
columns = thisrowcolumns[quot]
for i in range(len(words)):
if len(words) > len(columns):
logger.warn("Mismatched number of columns: %s", line)
logger.warn("%d %d", len(words), len(columns))
break
if words[i] in columns:
logger.warn("Skipping line: %s", line)
valrow -= 1
break
if self.options and columns[i] not in self.options:
continue
if columns[i] not in outfilehandlers[command]:
outfilehandlers[command][columns[i]] = open(self.get_csv_C(command, columns[i]), 'w')
self.csv_files.append(self.get_csv_C(command, columns[i]))
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
outfilehandlers[command][columns[i]].write(ts + ',')
outfilehandlers[command][columns[i]].write(words[i])
outfilehandlers[command][columns[i]].write('\n')
for command in outfilehandlers:
for column in outfilehandlers[command]:
outfilehandlers[command][column].close()
return True
|
src/oncall/api/v0/ical_key.py | lukdz/oncall | 857 | 11161788 | <filename>src/oncall/api/v0/ical_key.py
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
import uuid
from ... import db
def generate_ical_key():
return str(uuid.uuid4())
def check_ical_team(team, requester):
"""
Currently we allow users to request ical key for any team calendar
"""
connection = db.connect()
cursor = connection.cursor()
cursor.execute(
'''
SELECT `id`
FROM `team`
WHERE `name` = %s AND `active` = TRUE
''',
(team, ))
team_exist_and_active = cursor.rowcount
cursor.close()
connection.close()
return team_exist_and_active != 0
def check_ical_key_requester(key, requester):
connection = db.connect()
cursor = connection.cursor()
cursor.execute(
'''
SELECT `key`
FROM `ical_key`
WHERE `key` = %s AND `requester` = %s
''',
(key, requester))
is_requester = cursor.rowcount
cursor.close()
connection.close()
return is_requester != 0
def get_name_and_type_from_key(key):
connection = db.connect()
cursor = connection.cursor()
result = None
cursor.execute(
'''
SELECT `name`, `type`
FROM `ical_key`
WHERE `key` = %s
''',
(key, ))
if cursor.rowcount != 0:
row = cursor.fetchone()
result = (row[0], row[1])
cursor.close()
connection.close()
return result
def get_ical_key(requester, name, type):
connection = db.connect()
cursor = connection.cursor()
cursor.execute(
'''
SELECT `key`
FROM `ical_key`
WHERE
`requester` = %s AND
`name` = %s AND
`type` = %s
''',
(requester, name, type))
if cursor.rowcount == 0:
key = None
else:
key = cursor.fetchone()[0]
cursor.close()
connection.close()
return key
def update_ical_key(requester, name, type, key):
connection = db.connect()
cursor = connection.cursor()
cursor.execute(
'''
INSERT INTO `ical_key` (`key`, `requester`, `name`, `type`, `time_created`)
VALUES (%s, %s, %s, %s, UNIX_TIMESTAMP())
ON DUPLICATE KEY UPDATE `key` = %s, `time_created` = UNIX_TIMESTAMP()
''',
(key, requester, name, type, key))
connection.commit()
cursor.close()
connection.close()
def delete_ical_key(requester, name, type):
connection = db.connect()
cursor = connection.cursor()
cursor.execute(
'''
DELETE FROM `ical_key`
WHERE
`requester` = %s AND
`name` = %s AND
`type` = %s
''',
(requester, name, type))
connection.commit()
cursor.close()
connection.close()
def get_ical_key_detail(key):
connection = db.connect()
cursor = connection.cursor(db.DictCursor)
cursor.execute(
'''
SELECT `requester`, `name`, `type`, `time_created`
FROM `ical_key`
WHERE `key` = %s
''',
(key, ))
# fetchall because we may want to know if there is any key (uuid) collision
results = cursor.fetchall()
cursor.close()
connection.close()
return results
def get_ical_key_detail_by_requester(requester):
connection = db.connect()
cursor = connection.cursor(db.DictCursor)
cursor.execute(
'''
SELECT `key`, `name`, `type`, `time_created`
FROM `ical_key`
WHERE `requester` = %s
''',
(requester, ))
results = cursor.fetchall()
cursor.close()
connection.close()
return results
def invalidate_ical_key(key):
connection = db.connect()
cursor = connection.cursor()
cursor.execute(
'''
DELETE FROM `ical_key`
WHERE
`key` = %s
''',
(key, ))
connection.commit()
cursor.close()
connection.close()
def invalidate_ical_key_by_requester(requester):
connection = db.connect()
cursor = connection.cursor()
cursor.execute(
'''
DELETE FROM `ical_key`
WHERE
`requester` = %s
''',
(requester, ))
connection.commit()
cursor.close()
connection.close()
|
ckan/migration/versions/032_d89e0731422d_add_extra_info_field_to_resources.py | ziveo/ckan | 2,805 | 11161827 | <filename>ckan/migration/versions/032_d89e0731422d_add_extra_info_field_to_resources.py
# encoding: utf-8
"""032 Add extra info field_to_resources
Revision ID: d89e0731422d
Revises: <KEY>6
Create Date: 2018-09-04 18:49:00.003141
"""
from alembic import op
import sqlalchemy as sa
from ckan.migration import skip_based_on_legacy_engine_version
# revision identifiers, used by Alembic.
revision = 'd89e0731422d'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
if skip_based_on_legacy_engine_version(op, __name__):
return
op.add_column('package_resource', sa.Column('extras', sa.UnicodeText))
op.add_column(
'package_resource_revision', sa.Column('extras', sa.UnicodeText)
)
def downgrade():
op.drop_column('package_resource', 'extras')
op.drop_column('package_resource_revision', 'extras')
|
reviewboard/reviews/models/general_comment.py | amalik2/reviewboard | 921 | 11161837 | from __future__ import unicode_literals
from reviewboard.reviews.models.base_comment import BaseComment
from django.utils.translation import ugettext_lazy as _
class GeneralComment(BaseComment):
"""A comment on a review request that is not tied to any code or file.
A general comment on a review request is used when a comment is not tied
to specific lines of code or a special file attachment, and an issue is
opened. Examples include suggestions for testing or pointing out errors
in the change description.
"""
anchor_prefix = 'gcomment'
comment_type = 'general'
def get_absolute_url(self):
return self.get_review_url()
class Meta:
app_label = 'reviews'
db_table = 'reviews_generalcomment'
verbose_name = _('General Comment')
verbose_name_plural = _('General Comments')
|
agents/simple-trpo/simple_trpo/vectorized_env.py | yangalexandery/rl-teacher | 463 | 11161840 | <filename>agents/simple-trpo/simple_trpo/vectorized_env.py<gh_stars>100-1000
import numpy as np
from multiprocessing import Process, Pipe
import cloudpickle
def env_worker(remote, env_fn_wrapper):
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, _ = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class VectorizedEnv(object):
def __init__(self, env_fns):
"""
:param env_fns: A list of thunks for generating environments
"""
prototype_env = env_fns[0]() # Construct an env to extract action spaces
self.action_space = prototype_env.action_space
self.observation_space = prototype_env.observation_space
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(len(env_fns))])
self.ps = [Process(target=env_worker, args=(work_remote, CloudpickleWrapper(env_fn)))
for (work_remote, env_fn) in zip(self.work_remotes, env_fns)]
for p in self.ps:
p.start()
def step(self, vectorized_actions):
"""
:param vectorized_actions: An iterable of actions
:return: (vectorized_obs, vectorized_rewards, vectorized_dones)
"""
for remote, action in zip(self.remotes, vectorized_actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
@property
def num_envs(self):
return len(self.remotes)
|
CodeGenX64/regs.py | robertmuth/Cwerg | 171 | 11161867 | from Base import ir
from Base import opcode_tab as o
from Base import reg_alloc
from Base import liveness
from Base import serialize
import enum
import dataclasses
from typing import List, Optional, Tuple
# This must mimic the DK enum (0: invalid, no more than 255 entries)
@enum.unique
class CpuRegKind(enum.Enum):
INVALID = 0
GPR = 1
FLT = 2
GPR_FAMILY = CpuRegKind.GPR.value
FLT_FAMILY = CpuRegKind.FLT.value
REG_KIND_TO_CPU_REG_FAMILY = {
o.DK.S8: GPR_FAMILY,
o.DK.S16: GPR_FAMILY,
o.DK.S32: GPR_FAMILY,
o.DK.S64: GPR_FAMILY,
#
o.DK.U8: GPR_FAMILY,
o.DK.U16: GPR_FAMILY,
o.DK.U32: GPR_FAMILY,
o.DK.U64: GPR_FAMILY,
#
o.DK.A64: GPR_FAMILY,
o.DK.C64: GPR_FAMILY,
#
o.DK.F32: FLT_FAMILY,
o.DK.F64: FLT_FAMILY,
}
# We use the 64 bit reg names regardless of the operand width
_REG_NAMES = ["rax", "rcx", "rdx", "rbx", "sp", "rbp", "rsi", "rdi",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
_GPR_REGS = [ir.CpuReg(name, i, CpuRegKind.GPR) for i, name in enumerate(_REG_NAMES)]
_FLT_REGS = [ir.CpuReg(f"xmm{i}", i, CpuRegKind.FLT) for i in range(16)]
CPU_REGS_MAP = {**{r.name: r for r in _GPR_REGS},
**{r.name: r for r in _FLT_REGS}}
GPR_RESERVED_MASK = 0x0011 # rax/sp is not available for allocation
GPR_REGS_MASK = 0xffee
GPR_LAC_REGS_MASK = 0xf028 # rbx, rbp, r12-r15
FLT_RESERVED_MASK = 0x0001 # xmm0 is not available for allocation
FLT_REGS_MASK = 0xffff
FLT_LAC_REGS_MASK = 0xff00 # xmm8 - xmm15
REGS_RESERVED = {_GPR_REGS[0], _FLT_REGS[0]}
_KIND_TO_CPU_REG_LIST = {
o.DK.S8: _GPR_REGS,
o.DK.S16: _GPR_REGS,
o.DK.S32: _GPR_REGS,
o.DK.U8: _GPR_REGS,
o.DK.U16: _GPR_REGS,
o.DK.U32: _GPR_REGS,
#
o.DK.U64: _GPR_REGS,
o.DK.S64: _GPR_REGS,
o.DK.A64: _GPR_REGS,
o.DK.C64: _GPR_REGS,
#
o.DK.F32: _FLT_REGS,
o.DK.F64: _FLT_REGS,
}
# mimics linux calling convention
_GPR_IN_REGS = [
_GPR_REGS[7], # rdi
_GPR_REGS[6], # rsi
_GPR_REGS[2], # rdx
_GPR_REGS[1], # rcx
_GPR_REGS[8], # r8
_GPR_REGS[9], # r9
_GPR_REGS[10], # r10
_GPR_REGS[11], # r11
_GPR_REGS[0], # rax
]
_GPR_OUT_REGS = [
_GPR_REGS[0], # rax
_GPR_REGS[2], # rdx
_GPR_REGS[7], # rdi
_GPR_REGS[6], # rsi
_GPR_REGS[1], # rcx
_GPR_REGS[8], # r8
_GPR_REGS[9], # r9
_GPR_REGS[10], # r10
_GPR_REGS[11], # r10
]
_FLT_IN_OUT_REGS = _FLT_REGS[1:8]
def MaskToGprRegs(mask: int) -> List[ir.CpuReg]:
out = []
for reg in _GPR_REGS:
if ((1 << reg.no) & mask) != 0:
out.append(reg)
return out
def MaskToFltRegs(mask: int) -> List[ir.CpuReg]:
out = []
for reg in _FLT_REGS:
if ((1 << reg.no) & mask) != 0:
out.append(reg)
return out
def _GetCpuRegsForSignature(kinds: List[o.DK], gpr_regs: List[ir.CpuReg],
flt_regs: List[ir.CpuReg]) -> List[ir.CpuReg]:
out = []
next_gpr = 0
next_flt = 0
for k in kinds:
if k in {o.DK.F32, o.DK.F64}:
cpu_reg = flt_regs[next_flt]
next_flt += 1
else:
cpu_reg = gpr_regs[next_gpr]
next_gpr += 1
out.append(cpu_reg)
return out
class PushPopInterface:
"""Used with FunPopargConversion and FunPushargConversion"""
@classmethod
def GetCpuRegsForInSignature(cls, kinds: List[o.DK]) -> List[ir.CpuReg]:
return _GetCpuRegsForSignature(kinds, _GPR_IN_REGS, _FLT_IN_OUT_REGS)
@classmethod
def GetCpuRegsForOutSignature(cls, kinds: List[o.DK]) -> List[ir.CpuReg]:
return _GetCpuRegsForSignature(kinds, _GPR_OUT_REGS, _FLT_IN_OUT_REGS)
class CpuRegPool(reg_alloc.RegPool):
"""
We also distinguish if the register is lac (live across calls)
"""
def __init__(self, fun: ir.Fun, bbl: ir.Bbl, allow_spilling,
gpr_available_lac: int, gpr_available_not_lac: int, flt_available_lac: int,
flt_available_not_lac: int):
super(CpuRegPool, self).__init__()
self._fun = fun
self._bbl = bbl
self._allow_spilling = allow_spilling
# set of registers that are ready to be allocated subject to the
# reserved regions below. Should use an ordered set here?
# Note, the int is a bit-mask
self._gpr_available_lac: int = gpr_available_lac
self._gpr_available_not_lac: int = gpr_available_not_lac
# for FLT and DBL since the register are overlapping
self._flt_available_lac: int = flt_available_lac
self._flt_available_not_lac: int = flt_available_not_lac
self._gpr_reserved: List[reg_alloc.PreAllocation] = [
reg_alloc.PreAllocation() for _ in range(len(_GPR_REGS))]
self._flt_reserved: List[reg_alloc.PreAllocation] = [
reg_alloc.PreAllocation() for _ in range(len(_FLT_REGS))]
def get_cpu_reg_family(self, kind: o.DK) -> int:
return CpuRegKind.FLT if kind in {o.DK.F64, o.DK.F32} else CpuRegKind.GPR
def get_available(self, lac, is_gpr) -> int:
# TODO: use lac as fallback if no not_lac is available
if is_gpr:
return self._gpr_available_lac if lac else self._gpr_available_not_lac
else:
return self._flt_available_lac if lac else self._flt_available_not_lac
def render_available(self, lac, is_gpr) -> str:
"""used by debugging tools"""
l = " lac" if lac else ""
return f"{self.get_available(True, is_gpr):08x} {self.get_available(False, is_gpr):08x}"
def set_available(self, lac, is_gpr, available):
if is_gpr:
if lac:
self._gpr_available_lac = available
else:
self._gpr_available_not_lac = available
else:
if lac:
self._flt_available_lac = available
else:
self._flt_available_not_lac = available
def add_reserved_range(self, lr: reg_alloc.LiveRange):
"""Add a reserved region to the pool (part of pool set up)"""
reg = lr.reg
assert reg.HasCpuReg()
cpu_reg = reg.cpu_reg
if cpu_reg.kind == CpuRegKind.GPR:
self._gpr_reserved[cpu_reg.no].add(lr)
else:
assert cpu_reg.kind == CpuRegKind.FLT
self._flt_reserved[cpu_reg.no].add(lr)
def get_available_reg(self, lr: reg_alloc.LiveRange) -> ir.CpuReg:
lac = liveness.LiveRangeFlag.LAC in lr.flags
is_gpr = lr.reg.kind.flavor() != o.DK_FLAVOR_F
available = self.get_available(lac, is_gpr)
# print(f"GET {lr} {self} avail:{available:x}")
if not is_gpr:
for n in range(len(_FLT_REGS)):
mask = 1 << n
if available & mask == mask:
if not self._flt_reserved[n].has_conflict(lr):
self.set_available(lac, is_gpr, available & ~mask)
return _KIND_TO_CPU_REG_LIST[lr.reg.kind][n]
else:
for n in range(len(_GPR_REGS)):
mask = 1 << n
if mask & available == mask:
if not self._gpr_reserved[n].has_conflict(lr):
self.set_available(lac, is_gpr, available & ~mask)
return _KIND_TO_CPU_REG_LIST[lr.reg.kind][n]
if self._allow_spilling:
return ir.CPU_REG_SPILL
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
lines = [f"{n - 1:2} {x}" for n, x in enumerate(serialize.BblRenderToAsm(self._bbl))]
print("\n".join(lines))
print(f"# ALLOCATION IMPOSSIBLE - no spilling allowed in {self._fun.name}:{self._bbl.name}")
print(f"# {lr}")
print(f"# ALLOCATOR status: {self}")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
assert False, f"in {self._fun.name}:{self._bbl.name} no reg available for {lr} in {self}"
def give_back_available_reg(self, cpu_reg: ir.CpuReg):
if cpu_reg in REGS_RESERVED:
return
reg_mask = 1 << cpu_reg.no
if cpu_reg.kind == CpuRegKind.FLT:
is_gpr = False
is_lac = (reg_mask & FLT_LAC_REGS_MASK) != 0
else:
is_gpr = True
is_lac = (reg_mask & GPR_LAC_REGS_MASK) != 0
available = self.get_available(is_lac, is_gpr)
self.set_available(is_lac, is_gpr, available | reg_mask)
# print (f"@@@@ adding {lac} {cpu_reg} {available | mask:x}")
def __str__(self):
gpr_lac, gpr_not_lac = self._gpr_available_lac, self._gpr_available_not_lac
flt_lac, flt_not_lac = self._flt_available_lac, self._flt_available_not_lac
out = [f"POOL gpr:{gpr_lac:x}/{gpr_not_lac:x} flt:{flt_lac:x}/{flt_not_lac:x}"]
for n, pa in enumerate(self._gpr_reserved):
if pa.ranges:
out.append(f"gpr{n} {len(pa.ranges)}")
for n, pa in enumerate(self._flt_reserved):
if pa.ranges:
out.append(f"flt{n} {len(pa.ranges)}")
return "\n".join(out)
def _RunLinearScan(bbl: ir.Bbl, fun: ir.Fun, live_ranges: List[liveness.LiveRange], allow_spilling,
gpr_regs_lac: int, gpr_regs_not_lac: int,
flt_regs_lac: int,
flt_regs_not_lac: int):
pool = CpuRegPool(fun, bbl, allow_spilling,
gpr_regs_lac, gpr_regs_not_lac, flt_regs_lac, flt_regs_not_lac)
for lr in live_ranges:
# since we are operating on a BBL we cannot change LiveRanges
# extending beyond the BBL.
# reg_kinds_fixed (e.g. Machine) regs are assumed to be
# pre-allocated and will not change
if liveness.LiveRangeFlag.PRE_ALLOC in lr.flags:
assert lr.cpu_reg is not ir.CPU_REG_INVALID and lr.cpu_reg is not ir.CPU_REG_SPILL
pool.add_reserved_range(lr)
else:
lr.cpu_reg = ir.CPU_REG_INVALID
# print (f"{pool}")
# print(f"\nPY {bbl.name}")
# for lr in live_ranges:
# print(f"{lr}")
# print ("\n".join(serialize.BblRenderToAsm(bbl)))
n = [0]
def logger(lr, message):
m = f"{n[0]} {lr} {message}"
n[0] += 1
print(m)
reg_alloc.RegisterAssignerLinearScan(live_ranges, pool, None)
def _AssignAllocatedRegsAndMarkSpilledRegs(live_ranges) -> int:
spill_count = 0
for lr in live_ranges:
if liveness.LiveRangeFlag.PRE_ALLOC in lr.flags:
continue
if lr.is_use_lr():
continue
assert lr.cpu_reg != ir.CPU_REG_INVALID
if lr.cpu_reg is ir.CPU_REG_SPILL:
lr.reg.cpu_reg = ir.StackSlot(0)
spill_count += 1
else:
lr.reg.cpu_reg = lr.cpu_reg
return spill_count
def _DumpBblWithLineNumbers(bbl):
lines = serialize.BblRenderToAsm(bbl)
print(lines.pop(0))
for n, l in enumerate(lines):
print(f"{n:2d}", l)
def _BblRegAllocOrSpill(bbl: ir.Bbl, fun: ir.Fun) -> int:
"""Allocates regs to the intra bbl live ranges
Note, this runs after global register allocation has occurred
"""
VERBOSE = False
if VERBOSE:
_DumpBblWithLineNumbers(bbl)
live_ranges = liveness.BblGetLiveRanges(bbl, fun, bbl.live_out)
live_ranges.sort()
for lr in live_ranges:
assert liveness.LiveRangeFlag.IGNORE not in lr.flags
# since we are operating on a BBL we cannot change LiveRanges
# extending beyond the BBL.
# reg_kinds_fixed (e.g. Machine) regs are assumed to be
# pre-allocated and will not change
if lr.reg.HasCpuReg():
lr.flags |= liveness.LiveRangeFlag.PRE_ALLOC
lr.cpu_reg = lr.reg.cpu_reg
if VERBOSE:
print (repr(lr))
# First reg-alloc path to determine if spilling is needed.
# Note, global and fixed registers have already been assigned and will
# be respected by the allocator.
_RunLinearScan(bbl, fun, live_ranges, True,
GPR_REGS_MASK & GPR_LAC_REGS_MASK, GPR_REGS_MASK & ~GPR_LAC_REGS_MASK,
FLT_REGS_MASK & FLT_LAC_REGS_MASK, FLT_REGS_MASK & ~FLT_LAC_REGS_MASK)
if VERBOSE:
print ("@@@ AFTER")
for lr in live_ranges:
print (repr(lr))
# for reg
# print(f"SPILL: {spilled_regs}")
# count += len(spilled_regs)
# reg_alloc.BblSpillRegs(bbl, fun, spilled_regs, o.DK.U32)
# continue
#
# # print ("\n".join(serialize.BblRenderToAsm(bbl)))
# return count
return _AssignAllocatedRegsAndMarkSpilledRegs(live_ranges)
def FunLocalRegAlloc(fun):
return ir.FunGenericRewriteBbl(fun, _BblRegAllocOrSpill)
@dataclasses.dataclass()
class EmitContext:
"""Grab bag of data needed for emitting instructions"""
gpr_reg_mask: int = 0 # bitmask for saved gpr
flt_reg_mask: int = 0 # bitmask for saved flt (dbl, etc.) only lower 64bits are saved
stk_size: int = 0
is_leaf: bool = False
scratch_cpu_reg: ir.CpuReg = ir.CPU_REG_INVALID
def _FunCpuRegStats(fun: ir.Fun) -> Tuple[int, int]:
gpr = 0
flt = 0
for bbl in fun.bbls:
for ins in bbl.inss:
for reg in ins.operands:
if isinstance(reg, ir.Reg):
if reg.IsSpilled():
continue
assert reg.HasCpuReg(), f"missing cpu reg for {reg} in {ins} {ins.operands}"
if reg.cpu_reg.kind == CpuRegKind.GPR:
gpr |= 1 << reg.cpu_reg.no
else:
flt |= 1 << reg.cpu_reg.no
return gpr, flt
def FunComputeEmitContext(fun: ir.Fun) -> EmitContext:
gpr_mask, flt_mask = _FunCpuRegStats(fun)
gpr_mask &= GPR_LAC_REGS_MASK
flt_mask &= FLT_LAC_REGS_MASK
stk_size = (fun.stk_size + 15) // 16 * 16
return EmitContext(gpr_mask, flt_mask, stk_size, ir.FunIsLeaf(fun))
def AssignCpuRegOrMarkForSpilling(assign_to: List[ir.Reg],
cpu_reg_mask_first_choice: int,
cpu_reg_mask_second_choice: int) -> List[ir.Reg]:
"""
Returns the regs that could not be assigned.
"""
# print (f"@@ AssignCpuRegOrMarkForSpilling {len(assign_to)} {cpu_reg_mask_first_choice:x} {cpu_reg_mask_second_choice:x}")
mask = cpu_reg_mask_first_choice
pos = 0
for reg in assign_to:
if mask == 0 and cpu_reg_mask_second_choice != 0:
mask = cpu_reg_mask_second_choice
cpu_reg_mask_second_choice = 0
pos = 0
if mask == 0:
reg.cpu_reg = ir.StackSlot()
continue
while ((1 << pos) & mask) == 0: pos += 1
assert reg.cpu_reg is None
reg.cpu_reg = _KIND_TO_CPU_REG_LIST[reg.kind][pos]
mask &= ~(1 << pos)
pos += 1
|
migrations/versions/3bef10ab5088_add_various_testgrou.py | vault-the/changes | 443 | 11161879 | <filename>migrations/versions/3bef10ab5088_add_various_testgrou.py
"""Add various TestGroup indexes
Revision ID: 3bef10ab5088
Revises: fd1a<PASSWORD>
Create Date: 2013-11-04 17:10:52.057285
"""
# revision identifiers, used by Alembic.
revision = '3bef10ab5088'
down_revision = '<PASSWORD>'
from alembic import op
def upgrade():
op.create_index('idx_testgroup_project_id', 'testgroup', ['project_id'])
op.create_index('idx_testgroup_suite_id', 'testgroup', ['suite_id'])
op.create_index('idx_testgroup_parent_id', 'testgroup', ['parent_id'])
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
tools/freq_gen/freq_gen.py | wizard97/iSkipper | 150 | 11161904 | #! /usr/bin/env python3
import subprocess
import os
import argparse
tool_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
decoder_tool = os.path.join(tool_dir, 'spi_decoder', 'spidecode.py')
fifo_marker = 'FIFO ================== FIFO'
def target_prefix(addr):
return 'W\t' + addr + '\t0x'
target_regs = ['0x07', '0x08', '0x09']
fstep = 61.0
def extract_freqs(spi_dump_file):
table = subprocess.check_output([decoder_tool, spi_dump_file]).decode('utf-8')
sections = table.split(fifo_marker)
section_map = {}
count = 0
for section in sections[:2]:
section_map[count] = {}
for line in section.splitlines():
for target_reg in target_regs:
if line.startswith(target_prefix(target_reg)):
if target_reg not in section_map[count]:
section_map[count][target_reg] = line[len(target_prefix(target_reg)):len(target_prefix(target_reg))+2]
count += 1
return [section_map[0][x] for x in target_regs], [section_map[1][x] for x in target_regs]
def main():
parser = argparse.ArgumentParser(
description='Generates frequency codes')
parser.add_argument(
'datacsv', help='SPI output CSV file from Saleae software')
args = parser.parse_args()
send, receive = extract_freqs(args.datacsv)
sval=[int(x, 16) for x in send]
rval=[int(x, 16) for x in receive]
sval.reverse()
rval.reverse()
sendfreq = sum([v << 8*i for i,v in enumerate(sval)])*fstep
recvfreq = sum([v << 8*i for i,v in enumerate(rval)])*fstep
print(send)
print(receive)
print("Send (KHz): %f, Recv (KHz):%f" % (sendfreq/1000, recvfreq/1000))
if __name__ == '__main__':
main()
|
osp/citations/models/citation_index.py | davidmcclure/open-syllabus-project | 220 | 11161907 | <gh_stars>100-1000
import random
import numpy as np
import us
import iso3166
from osp.common import config
from osp.common.utils import query_bar
from osp.common.mixins.elasticsearch import Elasticsearch
from osp.citations.models import Text, Citation
from scipy.stats import rankdata
from clint.textui import progress
class Citation_Index(Elasticsearch):
es_index = 'citation'
es_mapping = {
'_id': {
'index': 'not_analyzed',
'store': True,
},
'properties': {
'text_id': {
'type': 'integer'
},
'document_id': {
'type': 'integer'
},
'corpus': {
'index': 'not_analyzed',
'type': 'string',
},
'subfield_id': {
'type': 'integer'
},
'field_id': {
'type': 'integer'
},
'institution_id': {
'type': 'integer'
},
'state': {
'index': 'not_analyzed',
'type': 'string',
},
'country': {
'index': 'not_analyzed',
'type': 'string',
},
}
}
@classmethod
def es_stream_docs(cls):
"""
Stream Elasticsearch docs.
Yields:
dict: The next document.
"""
query = (
Citation.select()
.join(Text)
.where(Text.display==True)
.where(Text.valid==True)
)
for row in query_bar(query):
doc = {}
# Local fields:
doc['_id'] = row.id
doc['text_id'] = row.text_id
doc['document_id'] = row.document_id
doc['corpus'] = row.text.corpus
# Field references:
subfield = row.subfield
if subfield:
doc['subfield_id'] = subfield.id
doc['field_id'] = subfield.field_id
# Institution reference:
inst = row.institution
if inst:
doc['institution_id'] = inst.id
doc['state'] = inst.state
doc['country'] = inst.country
yield doc
@classmethod
def compute_ranking(cls, filters={}, depth=1e5):
"""
Given a set of query filters, count the number of times each text is
cited on documents that match the criteria.
Args:
filters (dict): A set of key -> value filters.
depth (int): The max number of texts to rank.
Returns:
dict: {'text_id' -> count}
"""
conds = []
# Assemble match filters.
for field, value in filters.items():
if value: # Ignore empty values.
conds.append({
('terms' if type(value) is list else 'term'): {
field: value
}
})
# Query for the aggregation.
result = config.es.search(
index = cls.es_index,
doc_type = cls.es_index,
search_type = 'count',
body = {
'query': {
'bool': {
'must': conds
}
},
'aggs': {
'texts': {
'terms': {
'field': 'text_id',
'size': depth,
}
}
}
}
)
# Map text id -> citation count.
counts = {}
for b in result['aggregations']['texts']['buckets']:
counts[str(b['key'])] = b['doc_count']
return counts
@classmethod
def docs_with_text(cls, text_id, depth=1000):
"""
Given a text, get the set of documents that assign the text.
Args:
text_id (int)
Returns:
list: A set of document ids.
"""
result = config.es.search(
index = cls.es_index,
doc_type = cls.es_index,
search_type = 'count',
body = {
'query': {
'term': {
'text_id': text_id
}
},
'aggs': {
'texts': {
'terms': {
'field': 'document_id',
'size': depth,
}
}
}
}
)
doc_ids = []
for b in result['aggregations']['texts']['buckets']:
doc_ids.append(b['key'])
return doc_ids
@classmethod
def count_facets(cls, field, depth=100, include=None):
"""
Given a field, return a set of facet counts.
Args:
field (str)
depth (int)
include (list)
Returns:
list: (value, count)
"""
result = config.es.search(
index = cls.es_index,
doc_type = cls.es_index,
search_type = 'count',
body = {
'aggs': {
'ranking': {
'terms': {
'field': field,
'size': depth,
}
},
'include': {
'terms': {
'field': field,
'include': include or [],
}
},
}
}
)
counts = {}
# Merge the raw rankings + include list.
for agg in ['ranking', 'include']:
for b in result['aggregations'][agg]['buckets']:
counts[b['key']] = (b['key'], b['doc_count'])
# Sort by count descending.
return sorted(
list(counts.values()),
key=lambda x: x[1],
reverse=True,
)
@classmethod
def compute_scores(cls, *args, **kwargs):
"""
Compute "percentile" scores for texts - text X is assigned more
frequently than Y percent of all texts.
Args:
depth (int): The max number of texts to rank.
Returns:
dict: {'text_id' -> count}
"""
# Pull unfiltered counts.
counts = cls.compute_ranking(*args, **kwargs)
# Get the max count.
max_count = max(list(counts.values()))
return {
tid: np.sqrt(count) / np.sqrt(max_count)
for tid, count in counts.items()
}
|
afctl/tests/deployment_tests/test_local_deployment.py | hafixo/afctl | 131 | 11161921 | <reponame>hafixo/afctl
from afctl.plugins.deployments.docker.deployment_config import DockerDeploymentConfig
from afctl.tests.utils import clean_up, PROJECT_NAME, PROJECT_CONFIG_DIR
import pytest
import os, subprocess
class TestLocalDeployment:
@pytest.fixture(scope='function')
def create_project(self):
clean_up(PROJECT_NAME)
clean_up(PROJECT_CONFIG_DIR)
main_dir = os.path.join('/tmp', PROJECT_NAME)
subprocess.run(['mkdir', main_dir])
subprocess.run(['mkdir', PROJECT_CONFIG_DIR])
subprocess.run(['mkdir', os.path.join(main_dir, 'deployments')])
config_file = "{}.yml".format(os.path.join(PROJECT_CONFIG_DIR, PROJECT_NAME))
subprocess.run(['touch', config_file])
config_file_content = """
global:
airflow_version:
git:
origin:
access-token:
deployment:
qubole:
local:
compose:
"""
with open(config_file, 'w') as file:
file.write(config_file_content)
yield main_dir
clean_up(PROJECT_NAME)
clean_up(PROJECT_CONFIG_DIR)
def test_docker_compose_generation(self, create_project):
DockerDeploymentConfig.generate_dirs(create_project, PROJECT_NAME)
config_file = "{}.yml".format(os.path.join(PROJECT_CONFIG_DIR, PROJECT_NAME))
expected_output = """global:
airflow_version: null
git:
origin: null
access-token: null
deployment:
qubole: null
local:
compose: /tmp/test_project/deployments/test_project-docker-compose.yml
"""
current_output = open(config_file).read()
expected_output = expected_output.replace(" ", "")
current_output = current_output.replace(" ", "")
assert expected_output == current_output |
dirty_equals/_strings.py | mattkram/dirty-equals | 386 | 11161979 | import re
from typing import Any, Optional, Pattern, Tuple, Type, TypeVar, Union
from ._base import DirtyEquals
from ._utils import Omit, plain_repr
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal # type: ignore[misc]
T = TypeVar('T', str, bytes)
__all__ = 'IsStr', 'IsBytes', 'IsAnyStr'
class IsAnyStr(DirtyEquals[T]):
"""
Comparison of `str` or `bytes` objects.
This class allow comparison with both `str` and `bytes` but is subclassed
by [`IsStr`][dirty_equals.IsStr] and [`IsBytes`][dirty_equals.IsBytes] which restrict comparison to
`str` or `bytes` respectively.
"""
expected_types: Tuple[Type[Any], ...] = (str, bytes)
def __init__(
self,
*,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
case: Literal['upper', 'lower', None] = None,
regex: Union[None, T, Pattern[T]] = None,
regex_flags: int = 0,
):
"""
Args:
min_length: minimum length of the string/bytes
max_length: maximum length of the string/bytes
case: check case of the string/bytes
regex: regular expression to match the string/bytes with, `re.fullmatch` is used.
This can be a compiled regex, or a string or bytes.
regex_flags: optional flags for the regular expression
Examples:
```py title="IsAnyStr"
from dirty_equals import IsAnyStr
assert 'foobar' == IsAnyStr()
assert b'foobar' == IsAnyStr()
assert 123 != IsAnyStr()
assert 'foobar' == IsAnyStr(regex='foo...')
assert 'foobar' == IsAnyStr(regex=b'foo...') #(1)!
assert 'foobar' == IsAnyStr(min_length=6)
assert 'foobar' != IsAnyStr(min_length=8)
assert 'foobar' == IsAnyStr(case='lower')
assert 'Foobar' != IsAnyStr(case='lower')
```
1. `regex` can be either a string or bytes, `IsAnyStr` will take care of conversion so checks work.
"""
self.min_length = min_length
self.max_length = max_length
self.case = case
self._flex = len(self.expected_types) > 1
if regex is None:
self.regex: Union[None, T, Pattern[T]] = None
self.regex_flags: int = 0
else:
self.regex, self.regex_flags = self._prepare_regex(regex, regex_flags)
super().__init__(
min_length=Omit if min_length is None else min_length,
max_length=Omit if max_length is None else max_length,
case=case or Omit,
regex=regex or Omit,
regex_flags=Omit if regex_flags == 0 else plain_repr(repr(re.RegexFlag(regex_flags))),
)
def equals(self, other: Any) -> bool:
if type(other) not in self.expected_types:
return False
if self.regex is not None:
if self._flex and isinstance(other, str):
other = other.encode()
if not re.fullmatch(self.regex, other, flags=self.regex_flags):
return False
len_ = len(other)
if self.min_length is not None and len_ < self.min_length:
return False
if self.max_length is not None and len_ > self.max_length:
return False
if self.case == 'upper' and not other.isupper():
return False
if self.case == 'lower' and not other.islower():
return False
return True
def _prepare_regex(self, regex: Union[T, Pattern[T]], regex_flags: int) -> Tuple[Union[T, Pattern[T]], int]:
if isinstance(regex, re.Pattern):
if self._flex:
# less performant, but more flexible
if regex_flags == 0 and regex.flags != re.UNICODE:
regex_flags = regex.flags & ~re.UNICODE
regex = regex.pattern
elif regex_flags != 0:
regex = regex.pattern
if self._flex and isinstance(regex, str):
regex = regex.encode() # type: ignore[assignment]
return regex, regex_flags
class IsStr(IsAnyStr[str]):
"""
Checks if the value is a string, and optionally meets some constraints.
`IsStr` is a subclass of [`IsAnyStr`][dirty_equals.IsAnyStr] and therefore allows all the same arguments.
Examples:
```py title="IsStr"
from dirty_equals import IsStr
assert 'foobar' == IsStr()
assert b'foobar' != IsStr()
assert 'foobar' == IsStr(regex='foo...')
assert 'FOOBAR' == IsStr(min_length=5, max_length=10, case='upper')
```
"""
expected_types = (str,)
class IsBytes(IsAnyStr[bytes]):
"""
Checks if the value is a bytes object, and optionally meets some constraints.
`IsBytes` is a subclass of [`IsAnyStr`][dirty_equals.IsAnyStr] and therefore allows all the same arguments.
Examples:
```py title="IsBytes"
from dirty_equals import IsBytes
assert b'foobar' == IsBytes()
assert 'foobar' != IsBytes()
assert b'foobar' == IsBytes(regex=b'foo...')
assert b'FOOBAR' == IsBytes(min_length=5, max_length=10, case='upper')
```
"""
expected_types = (bytes,)
|
zulip_bots/zulip_bots/bots/tictactoe/test_tictactoe.py | dimisjim/python-zulip-api | 351 | 11162018 | from typing import Any, List, Tuple
from zulip_bots.game_handler import GameInstance
from zulip_bots.test_lib import BotTestCase, DefaultTests
class TestTicTacToeBot(BotTestCase, DefaultTests):
bot_name = "tictactoe"
# FIXME: Add tests for computer moves
# FIXME: Add test lib for game_handler
# Tests for TicTacToeModel functions
# Things that might need to be checked: how model is being used in these functions,
# When running the tests, many of the failures involved current_board. This
# may need to be initialized prior to the constructor initialization in order to
# avoid these errors.
def test_get_value(self) -> None:
board = [[0, 1, 0], [0, 0, 0], [0, 0, 2]]
position = (0, 1)
response = 1
self._test_get_value(board, position, response)
def _test_get_value(
self, board: List[List[int]], position: Tuple[int, int], expected_response: int
) -> None:
model, message_handler = self._get_game_handlers()
tictactoeboard = model(board)
response = tictactoeboard.get_value(board, position)
self.assertEqual(response, expected_response)
def test_determine_game_over_with_win(self) -> None:
board = [[1, 1, 1], [0, 2, 0], [2, 0, 2]]
players = ["Human", "Computer"]
response = "current turn"
self._test_determine_game_over_with_win(board, players, response)
def _test_determine_game_over_with_win(
self, board: List[List[int]], players: List[str], expected_response: str
) -> None:
model, message_handler = self._get_game_handlers()
tictactoegame = model(board)
response = tictactoegame.determine_game_over(players)
self.assertEqual(response, expected_response)
def test_determine_game_over_with_draw(self) -> None:
board = [[1, 2, 1], [1, 2, 1], [2, 1, 2]]
players = ["Human", "Computer"]
response = "draw"
self._test_determine_game_over_with_draw(board, players, response)
def _test_determine_game_over_with_draw(
self, board: List[List[int]], players: List[str], expected_response: str
) -> None:
model, message_handler = self._get_game_handlers()
tictactoeboard = model(board)
response = tictactoeboard.determine_game_over(players)
self.assertEqual(response, expected_response)
def test_board_is_full(self) -> None:
board = [[1, 0, 1], [1, 2, 1], [2, 1, 2]]
response = False
self._test_board_is_full(board, response)
def _test_board_is_full(self, board: List[List[int]], expected_response: bool) -> None:
model, message_handler = self._get_game_handlers()
tictactoeboard = model(board)
response = tictactoeboard.board_is_full(board)
self.assertEqual(response, expected_response)
def test_contains_winning_move(self) -> None:
board = [[1, 1, 1], [0, 2, 0], [2, 0, 2]]
response = True
self._test_contains_winning_move(board, response)
def _test_contains_winning_move(self, board: List[List[int]], expected_response: bool) -> None:
model, message_handler = self._get_game_handlers()
tictactoeboard = model(board)
response = tictactoeboard.contains_winning_move(board)
self.assertEqual(response, expected_response)
def test_get_locations_of_char(self) -> None:
board = [[0, 0, 0], [0, 0, 0], [0, 0, 1]]
response = [[2, 2]]
self._test_get_locations_of_char(board, response)
def _test_get_locations_of_char(
self, board: List[List[int]], expected_response: List[List[int]]
) -> None:
model, message_handler = self._get_game_handlers()
tictactoeboard = model(board)
response = tictactoeboard.get_locations_of_char(board, 1)
self.assertEqual(response, expected_response)
def test_is_valid_move(self) -> None:
board = [[0, 0, 0], [0, 0, 0], [1, 0, 2]]
move = "1,2"
response = True
self._test_is_valid_move(board, move, response)
move = "4,4"
response = False
self._test_is_valid_move(board, move, response)
def _test_is_valid_move(
self, board: List[List[int]], move: str, expected_response: bool
) -> None:
model, message_handler = self._get_game_handlers()
tictactoeboard = model(board)
response = tictactoeboard.is_valid_move(move)
self.assertEqual(response, expected_response)
def test_player_color(self) -> None:
turn = 0
response = ":x:"
self._test_player_color(turn, response)
def _test_player_color(self, turn: int, expected_response: str) -> None:
model, message_handler = self._get_game_handlers()
response = message_handler.get_player_color(0)
self.assertEqual(response, expected_response)
def test_static_responses(self) -> None:
model, message_handler = self._get_game_handlers()
self.assertNotEqual(message_handler.get_player_color(0), None)
self.assertNotEqual(message_handler.game_start_message(), None)
self.assertEqual(
message_handler.alert_move_message("foo", "move 3"), "foo put a token at 3"
)
def test_has_attributes(self) -> None:
model, message_handler = self._get_game_handlers()
self.assertTrue(hasattr(message_handler, "parse_board") is not None)
self.assertTrue(hasattr(message_handler, "alert_move_message") is not None)
self.assertTrue(hasattr(model, "current_board") is not None)
self.assertTrue(hasattr(model, "determine_game_over") is not None)
def test_parse_board(self) -> None:
board = [[0, 1, 0], [0, 0, 0], [0, 0, 2]]
response = ":one: :x: :three:\n\n" + ":four: :five: :six:\n\n" + ":seven: :eight: :o:\n\n"
self._test_parse_board(board, response)
def _test_parse_board(self, board: List[List[int]], expected_response: str) -> None:
model, message_handler = self._get_game_handlers()
response = message_handler.parse_board(board)
self.assertEqual(response, expected_response)
def add_user_to_cache(self, name: str, bot: Any = None) -> Any:
if bot is None:
bot, bot_handler = self._get_handlers()
message = {
"sender_email": f"{<EMAIL>",
"sender_full_name": f"{name}",
}
bot.add_user_to_cache(message)
return bot
def setup_game(self) -> None:
bot = self.add_user_to_cache("foo")
self.add_user_to_cache("baz", bot)
instance = GameInstance(
bot, False, "test game", "abc123", ["<EMAIL>", "<EMAIL>"], "test"
)
bot.instances.update({"abc123": instance})
instance.start()
return bot
def _get_game_handlers(self) -> Tuple[Any, Any]:
bot, bot_handler = self._get_handlers()
return bot.model, bot.gameMessageHandler
|
src/genie/libs/parser/iosxe/tests/ShowIpv6RaGuardPolicy/cli/equal/golden_output_expected.py | balmasea/genieparser | 204 | 11162074 | <reponame>balmasea/genieparser<filename>src/genie/libs/parser/iosxe/tests/ShowIpv6RaGuardPolicy/cli/equal/golden_output_expected.py
expected_output = {
"configuration": {
"trusted_port": "yes",
"device_role": "router",
"min_hop_limit": 1,
"max_hop_limit": 3,
"managed_config_flag": "on",
"other_config_flag": "on",
"max_router_preference": "high",
"match_ra_prefix": "bar",
"match_ipv6_access_list": "foo"
},
"device": {
1: {
"target": "Twe1/0/42",
"policy_type": "PORT",
"policy_name": "asdf",
"feature": "RA guard",
"tgt_range": "vlan all"
}
}
} |
skills_ml/algorithms/job_normalizers/__init__.py | bhagyaramgpo/skills-ml | 147 | 11162082 | """Algorithms to normalize a job title to a smaller space"""
|
objectModel/Python/cdm/persistence/syms/types/constant_entity.py | rt112000/CDM | 884 | 11162083 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import Union, List
from cdm.utilities import JObject
from .entity_reference import EntityReference
class ConstantEntity(JObject):
def __init__(self):
super().__init__()
self.explanation = None # type: str
self.constantEntityName = None # type: str
self.entityShape = None # type: Union[str, EntityReference]
self.constantValues = None # type: List[List[str]]
|
55.二叉树的深度/55.二叉树的深度.py | shenweichen/coding_interviews | 483 | 11162147 | # -*- coding:utf-8 -*-
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def TreeDepth(self, pRoot):
# write code here
if pRoot is None:
return 0
self.depth = 1
def preOrder(root,depth):
if root is None:
return
if root.left is None and root.right is None:
self.depth = max(self.depth,depth)
return
preOrder(root.left,depth+1)
preOrder(root.right,depth+1)
preOrder(pRoot,1)
return self.depth |
piGAN_lib/inverse_render.py | zihangJiang/CIPS-3D | 308 | 11162173 | import argparse
import math
import os
from torchvision.utils import save_image
import torch
import numpy as np
from PIL import Image
from tqdm import tqdm
import numpy as np
import skvideo.io
import curriculums
from torchvision import transforms
def tensor_to_PIL(img):
img = img.squeeze() * 0.5 + 0.5
return Image.fromarray(img.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy())
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument('generator_path', type=str)
parser.add_argument('image_path', type=str)
parser.add_argument('--seed', type=int, default=None)
parser.add_argument('--image_size', type=int, default=128)
parser.add_argument('--num_frames', type=int, default=64)
parser.add_argument('--max_batch_size', type=int, default=2400000)
opt = parser.parse_args()
generator = torch.load(opt.generator_path, map_location=torch.device(device))
ema_file = opt.generator_path.split('generator')[0] + 'ema.pth'
ema = torch.load(ema_file, map_location=device)
ema.copy_to(generator.parameters())
generator.set_device(device)
generator.eval()
if opt.seed is not None:
torch.manual_seed(opt.seed)
gt_image = Image.open(opt.image_path).convert('RGB')
transform = transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(256), transforms.Resize((opt.image_size, opt.image_size), interpolation=0), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
gt_image = transform(gt_image).to(device)
options = {
'img_size': opt.image_size,
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'num_steps': 24,
'h_stddev': 0,
'v_stddev': 0,
'h_mean': torch.tensor(math.pi/2).to(device),
'v_mean': torch.tensor(math.pi/2).to(device),
'hierarchical_sample': False,
'sample_dist': None,
'clamp_mode': 'relu',
'nerf_noise': 0,
}
render_options = {
'img_size': 256,
'fov': 12,
'ray_start': 0.88,
'ray_end': 1.12,
'num_steps': 48,
'h_stddev': 0,
'v_stddev': 0,
'v_mean': math.pi/2,
'hierarchical_sample': True,
'sample_dist': None,
'clamp_mode': 'relu',
'nerf_noise': 0,
'last_back': True,
}
z = torch.randn((10000, 256), device=device)
with torch.no_grad():
frequencies, phase_shifts = generator.siren.mapping_network(z)
w_frequencies = frequencies.mean(0, keepdim=True)
w_phase_shifts = phase_shifts.mean(0, keepdim=True)
w_frequency_offsets = torch.zeros_like(w_frequencies)
w_phase_shift_offsets = torch.zeros_like(w_phase_shifts)
w_frequency_offsets.requires_grad_()
w_phase_shift_offsets.requires_grad_()
frames = []
n_iterations_pose = 0
n_iterations = 700
os.makedirs('debug', exist_ok=True)
save_image(gt_image, "debug/gt.jpg", normalize=True)
optimizer = torch.optim.Adam([w_frequency_offsets, w_phase_shift_offsets], lr=1e-2, weight_decay = 1e-4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 100, gamma=0.75)
for i in range(n_iterations):
noise_w_frequencies = 0.03 * torch.randn_like(w_frequencies) * (n_iterations - i)/n_iterations
noise_w_phase_shifts = 0.03 * torch.randn_like(w_phase_shifts) * (n_iterations - i)/n_iterations
frame, _ = generator.forward_with_frequencies(w_frequencies + noise_w_frequencies + w_frequency_offsets, w_phase_shifts + noise_w_phase_shifts + w_phase_shift_offsets, **options)
loss = torch.nn.MSELoss()(frame, gt_image)
loss = loss.mean()
loss.backward()
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if i % 100 == 0:
save_image(frame, f"debug/{i}.jpg", normalize=True)
with torch.no_grad():
for angle in [-0.7, -0.5, -0.3, 0, 0.3, 0.5, 0.7]:
img, _ = generator.staged_forward_with_frequencies(w_frequencies + w_frequency_offsets, w_phase_shifts + w_phase_shift_offsets, h_mean=(math.pi/2 + angle), max_batch_size=opt.max_batch_size, lock_view_dependence=True, **render_options)
save_image(img, f"debug/{i}_{angle}.jpg", normalize=True)
trajectory = []
for t in np.linspace(0, 1, 24):
pitch = 0.2 * t
yaw = 0
trajectory.append((pitch, yaw))
for t in np.linspace(0, 1, opt.num_frames):
pitch = 0.2 * np.cos(t * 2 * math.pi)
yaw = 0.4 * np.sin(t * 2 * math.pi)
trajectory.append((pitch, yaw))
output_name = 'reconstructed.mp4'
writer = skvideo.io.FFmpegWriter(os.path.join('debug', output_name), outputdict={'-pix_fmt': 'yuv420p', '-crf': '21'})
frames = []
depths = []
with torch.no_grad():
for pitch, yaw in tqdm(trajectory):
render_options['h_mean'] = yaw + 3.14/2
render_options['v_mean'] = pitch + 3.14/2
frame, depth_map = generator.staged_forward_with_frequencies(w_frequencies + w_frequency_offsets, w_phase_shifts + w_phase_shift_offsets, max_batch_size=opt.max_batch_size, lock_view_dependence=True, **render_options)
frames.append(tensor_to_PIL(frame))
depths.append(depth_map.unsqueeze(0).expand(-1, 3, -1, -1).squeeze().permute(1, 2, 0).cpu().numpy())
for frame in frames:
writer.writeFrame(np.array(frame))
writer.close() |
data/tracking/methods/SiamFC/builders/source.py | zhangzhengde0225/SwinTrack | 143 | 11162198 | <filename>data/tracking/methods/SiamFC/builders/source.py
from core.run.event_dispatcher.register import EventRegister
from data.tracking.methods._common.builders.build_dataloader import build_dataloader
from .components.dataset import build_siamfc_dataset
from .components.data_processor import build_siamfc_tracker_data_processor
def build_siamfc_data_source(data_config: dict, runtime_vars, config: dict,
global_synchronized_rng, local_rng,
event_register: EventRegister, context):
data_processor, data_batch_collator, metric_collector = build_siamfc_tracker_data_processor(data_config, config)
master_address = runtime_vars.master_address
seed = local_rng.integers(100, 1000000)
dataset_config = data_config['source']
sampling_config = data_config['sampler']
dataset, worker_init_fn = build_siamfc_dataset(sampling_config, dataset_config, data_processor,
master_address, global_synchronized_rng, seed, event_register,
runtime_vars.rank)
dataloader = build_dataloader(data_config['batch_size'], runtime_vars, dataset, event_register,
worker_init_fn, data_batch_collator)
context['iterations_per_epoch'] = len(dataloader)
context['batch_size'] = data_config['batch_size']
return dataloader, {'data_pipeline': (metric_collector, )}
|
etna/transforms/timestamp/__init__.py | Pacman1984/etna | 326 | 11162210 | <reponame>Pacman1984/etna<gh_stars>100-1000
from etna.transforms.timestamp.date_flags import DateFlagsTransform
from etna.transforms.timestamp.fourier import FourierTransform
from etna.transforms.timestamp.holiday import HolidayTransform
from etna.transforms.timestamp.special_days import SpecialDaysTransform
from etna.transforms.timestamp.time_flags import TimeFlagsTransform
|
google_or_tools/nonogram_pbn_light.py | Wikunia/hakank | 279 | 11162230 | # webpbn.com Puzzle #803: You light up my life
# Copyright 2007 by <NAME>
#
rows = 45
row_rule_len = 4
row_rules = [
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 3],
[0, 0, 2, 2],
[0, 0, 1, 1],
[0, 0, 0, 7],
[0, 0, 1, 1],
[0, 1, 3, 1],
[0, 1, 3, 1],
[0, 0, 1, 1],
[0, 0, 0, 11],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 1, 4, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 0, 0, 25],
[0, 0, 6, 5],
[0, 0, 5, 6],
[0, 0, 4, 5]
]
cols = 50
col_rule_len = 5
col_rules = [
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 5],
[0, 0, 0, 7, 1],
[0, 0, 0, 6, 1],
[0, 0, 0, 6, 1],
[0, 0, 1, 6, 1],
[0, 0, 0, 4, 1],
[0, 0, 0, 7, 1],
[0, 1, 1, 1, 1],
[2, 1, 2, 1, 1],
[3, 1, 2, 1, 1],
[2, 1, 2, 1, 1],
[0, 1, 1, 1, 1],
[0, 0, 0, 7, 6],
[0, 0, 4, 1, 1],
[0, 1, 6, 1, 1],
[0, 0, 0, 6, 6],
[0, 0, 0, 6, 1],
[0, 0, 0, 5, 1],
[0, 0, 0, 0, 7],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]
]
|
dataflows/helpers/datapackage_processor.py | cschloer/dataflows | 160 | 11162239 | from .. import DataStreamProcessor, PackageWrapper
class datapackage_processor(DataStreamProcessor):
def __init__(self, dp_processor_func):
super(datapackage_processor, self).__init__()
self.func = dp_processor_func
self.dp = None
self.dp_processor = None
def process_datapackage(self, dp):
self.dp = PackageWrapper(dp)
self.dp_processor = self.func(self.dp)
ret = next(self.dp_processor)
if ret is None:
return dp
return ret
def process_resources(self, res_iter):
self.dp.it = res_iter
yield from self.dp_processor
|
kolibri/core/content/test/sqlalchemytesting.py | MBKayro/kolibri | 545 | 11162242 | from sqlalchemy import create_engine
from kolibri.core.content.utils.sqlalchemybridge import get_default_db_string
from kolibri.core.content.utils.sqlalchemybridge import SharingPool
def django_connection_engine():
if get_default_db_string().startswith("sqlite"):
return create_engine(
get_default_db_string(), poolclass=SharingPool, convert_unicode=True
)
return create_engine(
get_default_db_string(), convert_unicode=True, pool_pre_ping=True
)
|
docs/qa_formats.py | skiran252/FARM | 1,551 | 11162256 | ####################################
###### JSON (REST API) FORMAT ######
####################################
# INPUT
input = [{"questions": ["What is X?"], "text": "Some context containing the answer"}]
# OUTPUT
output= {
"task": "qa",
"predictions": [
{
"question": question,
"question_id": id,
"ground_truth": None,
"answers": answers,
"no_ans_gap": no_ans_gap # Add no_ans_gap to current no_ans_boost for switching top prediction
}
],
}
answer = {"score": score,
"probability": -1,
"answer": string,
"offset_answer_start": ans_start_ch,
"offset_answer_end": ans_end_ch,
"context": context_string,
"offset_context_start": context_start_ch,
"offset_context_end": context_end_ch,
"document_id": document_id}
###############################
###### SQUAD EVAL FORMAT ######
###############################
# INPUT
input = [{"qas": ["What is X?"], "context": "Some context containing the answer"}]
# OUTPUT
output = {"id": basket_id,
"preds": [[pred_str, start_t, end_t, score, sample_idx], ...]}
|
pydis_site/apps/resources/urls.py | doublevcodes/pysite | 700 | 11162266 | import typing
from pathlib import Path
from django_distill import distill_path
from pydis_site.apps.resources import views
app_name = "resources"
def get_all_resources() -> typing.Iterator[dict[str, str]]:
"""Yield a dict of all resource categories."""
for category in Path("pydis_site", "apps", "resources", "resources").iterdir():
yield {"category": category.name}
urlpatterns = [
distill_path("", views.ResourcesView.as_view(), name="index"),
distill_path(
"<str:category>/",
views.ResourcesListView.as_view(),
name="resources",
distill_func=get_all_resources
),
]
|
newsplease/pipeline/extractor/comparer/comparer_author.py | FrontFin/news-please | 1,311 | 11162317 | <reponame>FrontFin/news-please
class ComparerAuthor():
"""This class compares the titles of the list of ArticleCandidates and sends the result back to the Comparer."""
def extract(self, item, list_article_candidate):
"""Compares the extracted authors.
:param item: The corresponding NewscrawlerItem
:param list_article_candidate: A list, the list of ArticleCandidate-Objects which have been extracted
:return: A string, the most likely authors
"""
list_author = []
# The authors of the ArticleCandidates and the respective extractors are saved in a tuple in list_author.
for article_candidate in list_article_candidate:
if (article_candidate.author is not None) and (article_candidate.author != '[]'):
list_author.append((article_candidate.author, article_candidate.extractor))
# If there is no value in the list, return None.
if len(list_author) == 0:
return None
# If there are more options than one, return the result from newspaper.
list_newspaper = [x for x in list_author if x[1] == "newspaper"]
if len(list_newspaper) == 0:
# If there is no author extracted by newspaper, return the first result of list_author.
return list_author[0][0]
else:
return list_newspaper[0][0]
|
pysaliency/generics.py | Adrian398/pysaliency | 118 | 11162321 | from __future__ import absolute_import, print_function, division, unicode_literals
import time
import math
import sys
import os, errno
def makedirs(dirname):
"""Creates the directories for dirname via os.makedirs, but does not raise
an exception if the directory already exists and passes if dirname=""."""
if not dirname:
return
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def progressinfo(seq, verbose=True, length=None, prefix=''):
"""Yields from seq while displaying progress information.
Unlike mdp.utils.progessinfo, this routine does not
display the progress after each iteration but tries
to approximate adaequate stepsizes in order to print
the progress information roughly ones per second.
-verbose: if False, the function behaves like `yield from seq`
-length: can be used to give the length of sequences that
have no __len__ attribute or to overwrite the length
-prefix: Will be printed before the status information.
"""
if not verbose:
for item in seq:
yield item
return
next_step = 1
step_size = 1
last_time = time.time()
start_time = last_time
if length is None:
if hasattr(seq, '__len__'):
length = len(seq)
if length is not None:
prec = int(math.ceil(math.log10(length)))
out_string = "\r{prefix}{{count:{prec}d}} ({{ratio:3.1f}}%)".format(prec=prec, prefix=prefix)
else:
length = 1
out_string = "\r{prefix}{{count:d}}".format(prefix=prefix)
steps = 0
for i, item in enumerate(seq):
yield item
if i == next_step:
this_time = time.time()
time_diff = this_time - last_time + 0.0001
normed_timediff = time_diff / (step_size)
new_step_size = int(math.ceil(1.0/normed_timediff))
#In order to avoid overshooting the right stepsize, we take
#a convex combination with the old stepsize (that will be
#too small at the beginning)
step_size = int(math.ceil(0.8*step_size+0.2*new_step_size))
last_time = this_time
next_step = i+step_size
print(out_string.format(count=i, ratio=1.0*i/length*100), end='')
sys.stdout.flush()
#steps += 1
print(out_string.format(count=length, ratio=100.0))
#end_time = time.time()
#print "Needed Steps: ", steps
#print "Last stepsize", step_size
#print "Needed time", end_time - start_time
#print "Avg time per step", (end_time - start_time) / steps
def getChunks(seq,verbose=True):
"""Yields chunks from seq while optionally displaying progress information.
after each chunk.
This routine tries
to approximate adaequate chunksizes in order to print
the progress information roughly ones per second.
"""
next_step = 1
step_size = 1
last_time = time.time()
start_time = last_time
length = len(seq)
prec = int(math.ceil(math.log10(length)))
out_string = "\r %{0}d (%3.1f %%)".format(prec)
steps = 0
next_chunk = []
for i, item in enumerate(seq):
next_chunk.append(item)
if i == next_step:
yield next_chunk
next_chunk = []
this_time = time.time()
time_diff = this_time - last_time + 0.0001
normed_timediff = time_diff / (step_size)
new_step_size = int(math.ceil(1.0/normed_timediff))
#In order to avoid overshooting the right stepsize, we take
#a convex combination with the old stepsize (that will be
#too small at the beginning)
step_size = int(math.ceil(0.8*step_size+0.2*new_step_size))
last_time = this_time
next_step = i+step_size
if verbose:
print(out_string % (i, 1.0*i/length*100), end='')
sys.stdout.flush()
#steps += 1
if next_chunk:
yield next_chunk
print(out_string % (length, 100.0))
#end_time = time.time()
#print "Needed Steps: ", steps
#print "Last stepsize", step_size
#print "Needed time", end_time - start_time
#print "Avg time per step", (end_time - start_time) / steps
def arange_list(l, maxcols=None, empty=False):
pass
if __name__ == '__main__':
#new_list= []
#for chunk in getChunks(range(10000), prefix='test'):
# new_list.extend(chunk)
#assert(new_list == range(10000))
for i in progressinfo(range(1000), prefix='test'):
pass
|
tests/core/test_tiling.py | CNES/cars | 134 | 11162382 | #!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of CARS
# (see https://github.com/CNES/cars).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Test module for cars/core/tiling.py
"""
# Standard imports
from __future__ import absolute_import
import json
import os
import tempfile
# Third party imports
import fiona
import numpy as np
import pytest
from scipy.spatial import Delaunay # pylint: disable=no-name-in-module
from scipy.spatial import tsearch # pylint: disable=no-name-in-module
# CARS imports
from cars.core import tiling
# CARS Tests import
from ..helpers import temporary_dir
@pytest.mark.unit_tests
def test_grid():
grid = tiling.grid(0, 0, 500, 400, 90, 90)
assert grid.shape == (6, 7, 2)
@pytest.mark.unit_tests
def test_split():
"""
Test split terrain method
"""
splits = tiling.split(0, 0, 500, 500, 100, 100)
assert len(splits) == 25
@pytest.mark.unit_tests
def test_crop():
"""
Test crop function
"""
region1 = [0, 0, 100, 100]
region2 = [50, 0, 120, 80]
cropped = tiling.crop(region1, region2)
assert cropped == [50, 0, 100, 80]
@pytest.mark.unit_tests
def test_pad():
"""
Test pad function
"""
region = [1, 2, 3, 4]
margin = [5, 6, 7, 8]
assert tiling.pad(region, margin) == [-4, -4, 10, 12]
@pytest.mark.unit_tests
def test_empty():
"""
Test empty function
"""
assert tiling.empty([0, 0, 0, 10])
assert tiling.empty([0, 0, 10, 0])
assert tiling.empty([10, 10, 0, 0])
assert not tiling.empty([0, 0, 10, 10])
@pytest.mark.unit_tests
def test_union():
"""
Test union function
"""
assert tiling.union([[0, 0, 5, 6], [2, 3, 10, 11]]) == (0, 0, 10, 11)
@pytest.mark.unit_tests
def test_list_tiles():
"""
Test list_tiles function
"""
region = [45, 65, 55, 75]
largest_region = [0, 0, 100, 100]
tile_size = 10
tiles = tiling.list_tiles(region, largest_region, tile_size, margin=0)
assert tiles == [
[40, 60, 50, 70],
[40, 70, 50, 80],
[50, 60, 60, 70],
[50, 70, 60, 80],
]
tiles = tiling.list_tiles(region, largest_region, tile_size, margin=1)
assert tiles == [
[30, 50, 40, 60],
[30, 60, 40, 70],
[30, 70, 40, 80],
[30, 80, 40, 90],
[40, 50, 50, 60],
[40, 60, 50, 70],
[40, 70, 50, 80],
[40, 80, 50, 90],
[50, 50, 60, 60],
[50, 60, 60, 70],
[50, 70, 60, 80],
[50, 80, 60, 90],
[60, 50, 70, 60],
[60, 60, 70, 70],
[60, 70, 70, 80],
[60, 80, 70, 90],
]
@pytest.mark.unit_tests
def test_roi_to_start_and_size():
"""
Test roi_to_start_and_size function
"""
res = tiling.roi_to_start_and_size([0, 0, 10, 10], 10)
assert res == (0, 10, 1, 1)
@pytest.mark.unit_tests
def test_snap_to_grid():
"""
Test snap_to_grid function
"""
assert (0, 0, 11, 11) == tiling.snap_to_grid(0.1, 0.2, 10.1, 10.2, 1.0)
# function parameters are fixtures set in conftest.py
@pytest.mark.unit_tests
def test_terrain_region_to_epipolar(
images_and_grids_conf, # pylint: disable=redefined-outer-name
disparities_conf, # pylint: disable=redefined-outer-name
epipolar_sizes_conf,
): # pylint: disable=redefined-outer-name
"""
Test transform to epipolar method
"""
configuration = images_and_grids_conf
configuration["preprocessing"]["output"].update(
disparities_conf["preprocessing"]["output"]
)
configuration["preprocessing"]["output"].update(
epipolar_sizes_conf["preprocessing"]["output"]
)
region = [5.1952, 44.205, 5.2, 44.208]
out_region = tiling.terrain_region_to_epipolar(region, configuration)
assert out_region == [0.0, 0.0, 612.0, 400.0]
# function parameters are fixtures set in conftest.py
@pytest.mark.unit_tests
@pytest.mark.parametrize(
",".join(["terrain_tile_size", "epipolar_tile_size", "nb_corresp_tiles"]),
[[500, 612, 1], [45, 70, 15]],
)
def test_tiles_pairing(
terrain_tile_size,
epipolar_tile_size,
nb_corresp_tiles,
images_and_grids_conf, # pylint: disable=redefined-outer-name
disparities_conf, # pylint: disable=redefined-outer-name
epipolar_sizes_conf,
): # pylint: disable=redefined-outer-name
"""
Test terrain_grid_to_epipolar + get_corresponding_tiles
"""
configuration = images_and_grids_conf
configuration["preprocessing"]["output"].update(
disparities_conf["preprocessing"]["output"]
)
configuration["preprocessing"]["output"].update(
epipolar_sizes_conf["preprocessing"]["output"]
)
# fill constants with final dsm footprint
terrain_region = [675248, 4897075, 675460.5, 4897173]
largest_epipolar_region = [0, 0, 612, 612]
disp_min, disp_max = -20, 15
epsg = 32631
terrain_grid = tiling.grid(
*terrain_region, terrain_tile_size, terrain_tile_size
)
epipolar_regions_params = [
*largest_epipolar_region,
epipolar_tile_size,
epipolar_tile_size,
]
epipolar_regions = tiling.split(*epipolar_regions_params)
epipolar_regions_grid = tiling.grid(*epipolar_regions_params)
epipolar_regions_hash = [
tiling.region_hash_string(k) for k in epipolar_regions
]
# compute points min/max epipolar corresponding to terrain grid
points_min, points_max = tiling.terrain_grid_to_epipolar(
terrain_grid,
epipolar_regions_grid,
configuration,
disp_min,
disp_max,
epsg,
)
# Fill needed confdata with epipolar image information
confdata = {}
confdata["c1"] = {}
confdata["c1"]["epipolar_points_min"] = points_min
confdata["c1"]["epipolar_points_max"] = points_max
confdata["c1"]["largest_epipolar_region"] = largest_epipolar_region
confdata["c1"]["opt_epipolar_tile_size"] = epipolar_tile_size
confdata["c1"]["epipolar_regions_hash"] = epipolar_regions_hash
confdata["c1"]["delayed_point_clouds"] = epipolar_regions
# get epipolar tiles corresponding to the terrain grid
terrain_regions, corresp_tiles, __ = tiling.get_corresponding_tiles(
terrain_grid, confdata
)
# count the number of epipolar tiles for the first terrain tile
assert len(corresp_tiles[0]) == nb_corresp_tiles
ter_geodict, epi_geodict = tiling.get_paired_regions_as_geodict(
terrain_regions, corresp_tiles
)
# check geodict writing
with tempfile.TemporaryDirectory(dir=temporary_dir()) as tmp_dir:
ter_filename = f"terrain_tiles_{nb_corresp_tiles}.geojson"
epi_filename = f"epipolar_tiles_{nb_corresp_tiles}.geojson"
# CRS for all GeoJSON is epsg:4326: to convert for QGIS:
# > ogr2ogr -f "GeoJSON" out.geojson in.geojson \
# > -s_srs EPSG:32631 -t_srs EPSG:4326
with open(
os.path.join(tmp_dir, ter_filename), "w", encoding="utf-8"
) as writer:
writer.write(json.dumps(ter_geodict))
with open(
os.path.join(tmp_dir, epi_filename), "w", encoding="utf-8"
) as writer:
writer.write(json.dumps(epi_geodict))
for tmp_filename in [ter_filename, epi_filename]:
with fiona.open(os.path.join(tmp_dir, tmp_filename)):
pass
@pytest.mark.unit_tests
def test_filter_simplices_on_the_edges():
"""
Test filter simplices on the edges
"""
epipolar_grid = tiling.grid(0, 0, 2, 2, 1, 1)
# shift one point to obtain a concave hull
epipolar_grid[0, 1, 1] = 0.5
epipolar_grid_shape = epipolar_grid.shape[:2]
projected_epipolar = epipolar_grid.reshape(-1, 2)
terrain_grid = np.array(
[
[0.25, 0.25], # in a triangle
[0.75, 1.25], # in a triangle
[0.25, 1.75], # in a triangle
[2.05, 1.00], # not in a triangle
[1.00, 0.25], # in a "edges" triangle
]
)
tri = Delaunay(projected_epipolar)
simplices = tsearch(tri, terrain_grid)
original_simplices = simplices.copy()
tiling.filter_simplices_on_the_edges(epipolar_grid_shape, tri, simplices)
# only the last point must be filtered
(diff_indexes,) = np.where(original_simplices != simplices)
assert diff_indexes.tolist() == [4]
assert simplices[4] == -1
|
chainermn/extensions/_multi_node_snapshot.py | zjzh/chainer | 3,705 | 11162402 | <reponame>zjzh/chainer
import io
from chainer.serializers import load_npz
from chainer.serializers import save_npz
from chainer.training.extension import Extension
from chainer.training.extensions._snapshot import _find_latest_snapshot
def multi_node_snapshot(comm, snapshot, replica_sets):
'''Create trainer extension for multi-node snapshots
Provides generis multi-node snapshot saving and auto-load feature
at multi-node environment, leveraging power of single-node
snapshot.
In many cases snapshot target may differ, e.g. only trainer of
rank 0 process often has extensions such as ``LogReport`` and so
on, to not confuse terminal output. Just loading at one process
and broadcasting it to other processes does not work in that case.
This wrapper addresses that issue by defining sets of replicas
where within the set the target object is replicated and supposed
to be same among processes. For example, a trainer example, only
the trainer at rank ``0`` has special extensions and others
doesn't::
trainer = Trainer(updater)
if comm.rank == 0:
trainer.extend(extensions.DumpGraph('main/loss'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
This case can be described with two replica sets, where each set
can be represented as single integer that indicates rank number,
or iterable set/list/generator of integers like this::
replica_sets = [[0], range(1, comm.size)]
Here the first replica set is described as ``[0]``, or simply in
short just ``0``, and the second replica set is ``range(1,
comm.size)``, representing rest of processes other than ``0``. The
remaining list can be omitted. Thus in that case, it can be
simplified more::
replica_sets = [0,]
In this case, the snapshot will be saved at rank ``0`` process and
at rank ``1`` process. The latter represents the replica set of
``range(1, comm.size)`` . In this case autoloading at
initialization of snapshot extension works after the restart
cleanly, even though the size of the communicator differs.
Once the replica sets are defined, it can be easily extended::
replica_sets = [0,]
snapshot = multi_node_snapshot(comm, extensions.snapshot(),
replica_sets)
trainer.extend(snapshot, trigger=(1, 'epoch'))
More example tuples of replica set representation follows:
===================== ===== ==============================================
code nproc actual sets
===================== ===== ==============================================
``[0]`` ``4`` ``[{0}, {1, 2, 3}]``
``[0, 1]`` ``4`` ``[{0}, {1}, {2, 3}]``
``[0, 1], [2, 3]]`` ``4`` ``[{0, 1}, {2, 3}]``
``[]`` ``4`` ``[{0, 1, 2, 3}]``
``[range(0, 8, 2)]`` ``8`` ``[set(range(0, 8, 2)), set(range(1, 8, 2))]``
===================== ===== ==============================================
Args:
comm (ChainerMN communicator): communicater object
snapshot: Snapshot extension object obtained via
:meth:`~chainer.training.extensions.snapshot` .
replica_sets: list of replica set definition, where
a replica set can be defined by single integer
as rank number, or iterable integers.
Returns:
Trainer extension that wraps ``snapshot`` and properly
controles number of snapshots.
'''
return _MultiNodeSnapshot(comm, snapshot, replica_sets)
def _parse_replica_sets(replica_sets, size):
sets = []
for replica_set in replica_sets:
if isinstance(replica_set, int):
assert replica_set >= 0
assert replica_set < size
sets.append({replica_set})
else:
# Must be iterable
for i in replica_set:
assert i >= 0
assert i < size
sets.append(set(replica_set))
if size > sum(len(s) for s in sets):
all_ranks = set(range(size))
all_exp = set()
for s in sets:
all_exp |= s
rest = all_ranks - all_exp
if rest:
sets.append(rest)
# Must guarantee: no lack allowed
assert size == sum(len(s) for s in sets)
# Must guarantee: no two sets must have intersection.
all_sum = set()
for s in sets:
all_sum |= s
assert size == len(all_sum)
return sets
class _MultiNodeSnapshot(Extension):
def __init__(self, comm, snapshot, replica_sets):
assert comm is not None
assert snapshot is not None
self.comm = comm
self.snapshot = snapshot
# Append rank number to snapshot filename format/function
if callable(snapshot.filename):
filename_fun = snapshot.filename
def append_rank(trainer):
filename = filename_fun(trainer)
return '{}.{}'.format(filename, comm.rank)
snapshot.filename = append_rank
else:
filename = '{}.{}'.format(snapshot.filename, comm.rank)
snapshot.filename = filename
sets = _parse_replica_sets(replica_sets, comm.size)
self.master = None
self.replica_set = []
for s in sets:
if self.comm.rank in s:
self.master = min(s)
self.replica_set = s
break
assert self.master is not None
assert self.comm.rank in self.replica_set
@property
def is_master(self):
return self.master == self.comm.rank
def initialize(self, trainer):
if self.is_master:
self.snapshot.initialize(trainer)
# If autoload is off, no need to re-init this extension.
if not self.snapshot.autoload:
return
if self.snapshot._target is None:
target = trainer
else:
target = self.snapshot._target
# "Broadcast" the target here
if self.is_master:
# Find snapshot again
# TODO(kuenishi): replace with cleaner way to know whether
# a snapshot is autoloaded or not
filename = _find_latest_snapshot(self.snapshot.filename,
trainer.out)
if filename is None:
data = None
else:
buf = io.BytesIO()
save_npz(buf, target)
data = buf.getvalue()
for rank in self.replica_set:
if rank == self.comm.rank:
continue
self.comm.send_obj(data, rank)
# Get the loaded target from master
else:
data = self.comm.recv_obj(self.master)
if data is None:
return
load_npz(io.BytesIO(data), target)
def on_error(self, trainer, e, t):
if self.is_master:
self.snapshot.on_error(trainer, e, t)
def __call__(self, trainer):
if self.is_master:
self.snapshot(trainer)
def finalize(self):
if self.is_master:
self.snapshot.finalize()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.