max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
tests/h/search/index_test.py | BearerPipelineTest/h | 2,103 | 11136914 | import logging
from unittest import mock
from unittest.mock import sentinel
import elasticsearch
import pytest
from h.search.index import BatchIndexer
@pytest.mark.usefixtures("nipsa_service")
class TestBatchIndexer:
def test_it_indexes_all_annotations(
self, batch_indexer, factories, get_indexed_ann
):
annotations = factories.Annotation.create_batch(3)
ids = [a.id for a in annotations]
batch_indexer.index()
for _id in ids:
assert get_indexed_ann(_id) is not None
@pytest.mark.parametrize("target_index", (None, "custom_index"))
def test_it_accepts_different_indexes(self, target_index, es_client):
indexer = BatchIndexer(
session=sentinel.db,
es_client=es_client,
request=sentinel.request,
target_index=target_index,
)
assert (
indexer._target_index == target_index # pylint:disable=protected-access
if target_index
else es_client.index
)
def test_it_indexes_specific_annotations(
self, batch_indexer, factories, get_indexed_ann
):
annotations = factories.Annotation.create_batch(5)
ids = [a.id for a in annotations]
ids_to_index = ids[:3]
ids_not_to_index = ids[3:]
batch_indexer.index(ids_to_index)
for _id in ids_to_index:
assert get_indexed_ann(_id) is not None
for _id in ids_not_to_index:
with pytest.raises(elasticsearch.exceptions.NotFoundError):
get_indexed_ann(_id)
def test_it_does_not_index_deleted_annotations(
self, batch_indexer, factories, get_indexed_ann
):
ann = factories.Annotation()
# create deleted annotations
ann_del = factories.Annotation(deleted=True)
batch_indexer.index()
assert get_indexed_ann(ann.id) is not None
with pytest.raises(elasticsearch.exceptions.NotFoundError):
get_indexed_ann(ann_del.id)
def test_it_logs_indexing_status(self, caplog, batch_indexer, factories):
num_annotations = 10
window_size = 3
num_index_records = 0
annotations = factories.Annotation.create_batch(num_annotations)
ids = [a.id for a in annotations]
with caplog.at_level(logging.INFO):
batch_indexer.index(ids, window_size)
for record in caplog.records:
if record.filename == "index.py":
num_index_records = num_index_records + 1
assert "indexed 0k annotations, rate=" in record.getMessage()
assert num_index_records == num_annotations // window_size
def test_it_correctly_indexes_fields_for_bulk_actions(
self, batch_indexer, factories, get_indexed_ann
):
annotations = factories.Annotation.create_batch(2, groupid="group_a")
batch_indexer.index()
for ann in annotations:
result = get_indexed_ann(ann.id)
assert result.get("group") == ann.groupid
assert result.get("authority") == ann.authority
assert result.get("user") == ann.userid
assert result.get("uri") == ann.target_uri
def test_it_returns_errored_annotation_ids(self, batch_indexer, factories):
annotations = factories.Annotation.create_batch(3)
expected_errored_ids = {annotations[0].id, annotations[2].id}
elasticsearch.helpers.streaming_bulk = mock.Mock()
elasticsearch.helpers.streaming_bulk.return_value = [
(False, {"index": {"error": "some error", "_id": annotations[0].id}}),
(True, {}),
(False, {"index": {"error": "some error", "_id": annotations[2].id}}),
]
errored = batch_indexer.index()
assert errored == expected_errored_ids
def test_it_does_not_error_if_annotations_already_indexed(
self, db_session, es_client, factories, pyramid_request
):
annotations = factories.Annotation.create_batch(3)
expected_errored_ids = {annotations[1].id}
elasticsearch.helpers.streaming_bulk = mock.Mock()
elasticsearch.helpers.streaming_bulk.return_value = [
(True, {}),
(False, {"create": {"error": "some error", "_id": annotations[1].id}}),
(
False,
{
"create": {
"error": "document already exists",
"_id": annotations[2].id,
}
},
),
]
errored = BatchIndexer(
db_session, es_client, pyramid_request, es_client.index, "create"
).index()
assert errored == expected_errored_ids
@pytest.fixture
def batch_indexer( # pylint:disable=unused-argument
db_session, es_client, pyramid_request, moderation_service
):
return BatchIndexer(db_session, es_client, pyramid_request)
@pytest.fixture
def get_indexed_ann(es_client):
def _get(annotation_id):
"""
Return the annotation with the given ID from Elasticsearch.
Raises if the annotation is not found.
"""
return es_client.conn.get(
index=es_client.index, doc_type=es_client.mapping_type, id=annotation_id
)["_source"]
return _get
|
wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/unique_fragment_names.py | borisgrafx/client | 3,968 | 11136920 | from ...error import GraphQLError
from .base import ValidationRule
class UniqueFragmentNames(ValidationRule):
__slots__ = 'known_fragment_names',
def __init__(self, context):
super(UniqueFragmentNames, self).__init__(context)
self.known_fragment_names = {}
def enter_OperationDefinition(self, node, key, parent, path, ancestors):
return False
def enter_FragmentDefinition(self, node, key, parent, path, ancestors):
fragment_name = node.name.value
if fragment_name in self.known_fragment_names:
self.context.report_error(GraphQLError(
self.duplicate_fragment_name_message(fragment_name),
[self.known_fragment_names[fragment_name], node.name]
))
else:
self.known_fragment_names[fragment_name] = node.name
return False
@staticmethod
def duplicate_fragment_name_message(field):
return 'There can only be one fragment named "{}".'.format(field)
|
atomate/vasp/fireworks/tests/test_lobster.py | rkingsbury/atomate | 167 | 11136932 | <filename>atomate/vasp/fireworks/tests/test_lobster.py
import os
import unittest
from atomate.vasp.fireworks.core import StaticFW
from atomate.vasp.fireworks.lobster import LobsterFW
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
__author__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
db_dir = os.path.join(module_dir, "..", "..", "..", "common", "test_files")
reference_dir = os.path.join(module_dir, "..", "..", "test_files")
class TestLobsterFireworks(unittest.TestCase):
def setUp(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
self.structure = Structure(lattice, ["Si", "Si"], coords)
def testLobsterFW(self):
static_fw = StaticFW(structure=self.structure).name
self.assertEqual(
LobsterFW(structure=self.structure, parents=static_fw).name,
"Si-lobster_calculation",
)
lobster_fw = LobsterFW(
prev_calc_dir="/", delete_wavecar=True, delete_wavecar_previous_fw=True
)
self.assertEqual(lobster_fw.name, "unknown-lobster_calculation")
self.assertEqual(lobster_fw.tasks[0]["calc_dir"], "/")
self.assertEqual(len(lobster_fw.tasks), 7)
lobster_fw = LobsterFW(
prev_calc_dir="/", delete_wavecar=False, delete_wavecar_previous_fw=False
)
self.assertEqual(len(lobster_fw.tasks), 5)
# check for ValueError when no parent or calc_dir are provided
with self.assertRaises(ValueError):
LobsterFW()
if __name__ == "__main__":
unittest.main()
|
SegNet_Conv/test.py | xwshi/Semantic-Segmentation | 380 | 11136934 | <gh_stars>100-1000
#---------------------------------------------#
# 该部分用于查看网络结构
#---------------------------------------------#
from nets.segnet import convnet_segnet
if __name__ == "__main__":
model = convnet_segnet(2, input_height=416, input_width=416)
model.summary()
|
sympy/ntheory/bbp_pi.py | iamabhishek0/sympy | 445 | 11136961 | <reponame>iamabhishek0/sympy
'''
This implementation is a heavily modified fixed point implementation of
BBP_formula for calculating the nth position of pi. The original hosted
at: http://en.literateprograms.org/Pi_with_the_BBP_formula_(Python)
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub-license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Modifications:
1.Once the nth digit and desired number of digits is selected, the
number of digits of working precision is calculated to ensure that
the hexadecimal digits returned are accurate. This is calculated as
int(math.log(start + prec)/math.log(16) + prec + 3)
--------------------------------------- --------
/ /
number of hex digits additional digits
This was checked by the following code which completed without
errors (and dig are the digits included in the test_bbp.py file):
for i in range(0,1000):
for j in range(1,1000):
a, b = pi_hex_digits(i, j), dig[i:i+j]
if a != b:
print('%s\n%s'%(a,b))
Deceasing the additional digits by 1 generated errors, so '3' is
the smallest additional precision needed to calculate the above
loop without errors. The following trailing 10 digits were also
checked to be accurate (and the times were slightly faster with
some of the constant modifications that were made):
>> from time import time
>> t=time();pi_hex_digits(10**2-10 + 1, 10), time()-t
('e90c6cc0ac', 0.0)
>> t=time();pi_hex_digits(10**4-10 + 1, 10), time()-t
('26aab49ec6', 0.17100000381469727)
>> t=time();pi_hex_digits(10**5-10 + 1, 10), time()-t
('a22673c1a5', 4.7109999656677246)
>> t=time();pi_hex_digits(10**6-10 + 1, 10), time()-t
('9ffd342362', 59.985999822616577)
>> t=time();pi_hex_digits(10**7-10 + 1, 10), time()-t
('c1a42e06a1', 689.51800012588501)
2. The while loop to evaluate whether the series has converged quits
when the addition amount `dt` has dropped to zero.
3. the formatting string to convert the decimal to hexadecimal is
calculated for the given precision.
4. pi_hex_digits(n) changed to have coefficient to the formula in an
array (perhaps just a matter of preference).
'''
from __future__ import print_function, division
import math
from sympy.core.compatibility import range, as_int
def _series(j, n, prec=14):
# Left sum from the bbp algorithm
s = 0
D = _dn(n, prec)
D4 = 4 * D
k = 0
d = 8 * k + j
for k in range(n + 1):
s += (pow(16, n - k, d) << D4) // d
d += 8
# Right sum iterates to infinity for full precision, but we
# stop at the point where one iteration is beyond the precision
# specified.
t = 0
k = n + 1
e = 4*(D + n - k)
d = 8 * k + j
while True:
dt = (1 << e) // d
if not dt:
break
t += dt
# k += 1
e -= 4
d += 8
total = s + t
return total
def pi_hex_digits(n, prec=14):
"""Returns a string containing ``prec`` (default 14) digits
starting at the nth digit of pi in hex. Counting of digits
starts at 0 and the decimal is not counted, so for n = 0 the
returned value starts with 3; n = 1 corresponds to the first
digit past the decimal point (which in hex is 2).
Examples
========
>>> from sympy.ntheory.bbp_pi import pi_hex_digits
>>> pi_hex_digits(0)
'3243f6a8885a30'
>>> pi_hex_digits(0, 3)
'324'
References
==========
.. [1] http://www.numberworld.org/digits/Pi/
"""
n, prec = as_int(n), as_int(prec)
if n < 0:
raise ValueError('n cannot be negative')
if prec == 0:
return ''
# main of implementation arrays holding formulae coefficients
n -= 1
a = [4, 2, 1, 1]
j = [1, 4, 5, 6]
#formulae
D = _dn(n, prec)
x = + (a[0]*_series(j[0], n, prec)
- a[1]*_series(j[1], n, prec)
- a[2]*_series(j[2], n, prec)
- a[3]*_series(j[3], n, prec)) & (16**D - 1)
s = ("%0" + "%ix" % prec) % (x // 16**(D - prec))
return s
def _dn(n, prec):
# controller for n dependence on precision
# n = starting digit index
# prec = the number of total digits to compute
n += 1 # because we subtract 1 for _series
return int(math.log(n + prec)/math.log(16) + prec + 3)
|
Python/hello_hack_2018.py | PushpneetSingh/Hello-world | 1,428 | 11136963 | print ("Hello Hacktoberfest 2018 World")
|
octavia/statistics/drivers/logger.py | zhangi/octavia | 129 | 11136965 | <reponame>zhangi/octavia
# Copyright 2018 GoDaddy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from octavia.statistics import stats_base
LOG = logging.getLogger(__name__)
class StatsLogger(stats_base.StatsDriverMixin):
def update_stats(self, listener_stats, deltas=False):
for stats_object in listener_stats:
LOG.info("Logging listener stats%s for listener `%s` / "
"amphora `%s`: %s",
' deltas' if deltas else '',
stats_object.listener_id, stats_object.amphora_id,
stats_object.get_stats())
|
onnxruntime/test/testdata/coreml_argmax_cast_test.py | mszhanyi/onnxruntime | 669 | 11136975 | import onnx
from onnx import TensorProto, helper
# CoreML EP currently handles a special case for supporting ArgMax op
# Please see in <repo_root>/onnxruntime/core/providers/coreml/builders/impl/argmax_op_builder.cc and
# <repo_root>/onnxruntime/core/providers/coreml/builders/impl/cast_op_builder.cc
# We have this separated test script to generate graph for the case: An ArgMax followed by a Cast to int32 type
def GenerateModel(model_name):
nodes = [
helper.make_node("ArgMax", ["X"], ["argmax_output_int64"], "argmax", axis=1, keepdims=1),
helper.make_node("Cast", ["argmax_output_int64"], ["Y"], "cast", to=6), # cast to int32 type
]
graph = helper.make_graph(
nodes,
"CoreML_ArgMax_Cast_Test",
[ # input
helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2, 2]),
],
[ # output
helper.make_tensor_value_info("Y", TensorProto.INT32, [3, 1, 2]),
],
)
model = helper.make_model(graph)
onnx.save(model, model_name)
if __name__ == "__main__":
GenerateModel("coreml_argmax_cast_test.onnx")
|
tests/someapp/models.py | oss-transfer/django-neomodel | 166 | 11136981 | <filename>tests/someapp/models.py
from datetime import datetime
from django.db import models
from django_neomodel import DjangoNode
from neomodel import StringProperty, DateTimeProperty, UniqueIdProperty
class Library(models.Model):
name = models.CharField(max_length=10)
class Meta:
app_label = 'someapp'
class Book(DjangoNode):
uid = UniqueIdProperty(primary_key=True)
title = StringProperty(unique_index=True)
format = StringProperty(required=True) # check required field can be omitted on update
status = StringProperty(choices=(
('available', 'A'),
('on_loan', 'L'),
('damaged', 'D'),
), default='available', coerce=str)
created = DateTimeProperty(default=datetime.utcnow)
class Meta:
app_label = "someapp"
def __str__(self):
return self.title
class Shelf(DjangoNode):
uid = UniqueIdProperty(primary_key=True)
name = StringProperty()
class Meta:
app_label = "someapp"
def __str__(self):
return self.name |
src/ui/uivar.py | JayHeng/-NXP-MCUBootUtility | 174 | 11136987 | <filename>src/ui/uivar.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import json
import uidef
import RTyyyy_uidef
import RTxxx_uidef
g_exeTopRoot = None
g_soundEffectType = None
g_languageIndex = None
g_hasSubWinBeenOpened = False
g_efuseDict = {'0x400_lock':0x00000000,
'0x450_bootCfg0':0x00000000,
'0x460_bootCfg1':0x00000000,
'0x470_bootCfg2':0x00000000,
'0x6d0_miscConf0':0x00000000,
'0x6e0_miscConf1':0x00000000
}
g_cfgFilename = None
g_toolCommDict = {'toolRunMode':None,
'isDymaticUsbDetection':None,
'soundEffectType':None,
'isSbFileEnabledToGen':None,
'isAutomaticImageReadback':None,
'flashloaderResident':None,
'efuseGroupSel':None,
'isAutomaticEfuseLocker':None,
'flexspiXipRegionSel':None,
'isIvtEntryResetHandler':None,
'isEnglishLanguage':None,
'secBootType':None,
'mcuSeries':None,
'mcuDevice':None,
'bootDevice':None,
'isUsbhidPortSelected':None,
'isOneStepChecked':None,
'certSerial':None,
'certKeyPass':None,
'appFilename':None,
'appFormat':None,
'appBinBaseAddr':None,
'keyStoreRegion':None,
'certOptForHwCrypto':None
}
g_flexspiNorOpt0 = None
g_flexspiNorOpt1 = None
g_flexspiNorDeviceModel = None
g_isFdcbKept = None
g_flexspiNandOpt0 = None
g_flexspiNandOpt1 = None
g_flexspiNandFcbOpt = None
g_flexspiNandImageInfoList = [None] * 8
g_semcNorOpt = None
g_semcNorSetting = None
g_semcNorDeviceModel = None
g_semcNandOpt = None
g_semcNandFcbOpt = None
g_semcNandImageInfoList = [None] * 8
g_usdhcSdOpt = None
g_usdhcMmcOpt0 = None
g_usdhcMmcOpt1 = None
g_lpspiNorOpt0 = None
g_lpspiNorOpt1 = None
g_flexcommSpiNorOpt0 = None
g_flexcommSpiNorOpt1 = None
g_dcdCtrlDict = {'isDcdEnabled':None,
'dcdFileType':None}
g_dcdSettingsDict = {'dcdSource':None,
'userBinFile':None,
'userCfgFile':None,
'dcdPurpose':None,
'sdramBase':None,
'deviceModel':None,
'dcdDesc':None}
g_certSettingsDict = {'cstVersion':None,
'useExistingCaKey':None,
'useEllipticCurveCrypto':None,
'pkiTreeKeyLen':None,
'pkiTreeKeyCn':None,
'pkiTreeDuration':None,
'SRKs':None,
'caFlagSet':None}
g_signSettingsDict = {'isPartSigned':None,
'signedStart0':None,
'signedSize0':None,
'signedStart1':None,
'signedSize1':None,
'signedStart2':None,
'signedSize2':None}
g_otpmkKeyCommDict = {'secureBootType':None,
'opt':None,
'regionStartList':[None] * 4,
'regionLengthList':[None] * 4}
g_userKeyCtrlDict = {'mcu_device':None,
# For BEE
'engine_sel':None,
'engine0_key_src':None,
'engine0_fac_cnt':None,
'engine1_key_src':None,
'engine1_fac_cnt':None,
# For OTFAD
'total_regions':None,
'kek_src':None,
}
g_userKeyCmdDict = {'base_addr':None,
'hw_eng':None,
# For BEE
'engine0_key':None,
'engine0_arg':None,
'engine0_lock':None,
'engine1_key':None,
'engine1_arg':None,
'engine1_lock':None,
'use_zero_key':None,
'is_boot_image':None,
# For OTFAD
'kek':None,
'otfad_arg':None,
'scramble':None,
'scramble_align':None,
'otfad_ctx_lock':None,
}
def initVar(cfgFilename):
global g_hasSubWinBeenOpened
global g_cfgFilename
global g_toolCommDict
global g_flexspiNorOpt0
global g_flexspiNorOpt1
global g_flexspiNorDeviceModel
global g_isFdcbKept
global g_flexspiNandOpt0
global g_flexspiNandOpt1
global g_flexspiNandFcbOpt
global g_flexspiNandImageInfoList
global g_semcNandOpt
global g_semcNandFcbOpt
global g_semcNandImageInfoList
global g_lpspiNorOpt0
global g_lpspiNorOpt1
global g_flexcommSpiNorOpt0
global g_flexcommSpiNorOpt1
global g_semcNorOpt
global g_semcNorSetting
global g_semcNorDeviceModel
global g_usdhcSdOpt
global g_usdhcMmcOpt0
global g_usdhcMmcOpt1
global g_dcdCtrlDict
global g_dcdSettingsDict
global g_certSettingsDict
global g_signSettingsDict
global g_otpmkKeyCommDict
global g_userKeyCtrlDict
global g_userKeyCmdDict
g_hasSubWinBeenOpened = False
g_cfgFilename = cfgFilename
if os.path.isfile(cfgFilename):
cfgDict = None
with open(cfgFilename, 'r') as fileObj:
cfgDict = json.load(fileObj)
fileObj.close()
g_toolCommDict = cfgDict["cfgToolCommon"][0]
g_flexspiNorOpt0 = cfgDict["cfgFlexspiNor"][0]
g_flexspiNorOpt1 = cfgDict["cfgFlexspiNor"][1]
g_flexspiNorDeviceModel = cfgDict["cfgFlexspiNor"][2]
g_isFdcbKept = cfgDict["cfgFlexspiNor"][3]
g_flexspiNandOpt0 = cfgDict["cfgFlexspiNand"][0]
g_flexspiNandOpt1 = cfgDict["cfgFlexspiNand"][1]
g_flexspiNandFcbOpt = cfgDict["cfgFlexspiNand"][2]
g_flexspiNandImageInfoList = cfgDict["cfgFlexspiNand"][3]
g_semcNandOpt = cfgDict["cfgSemcNand"][0]
g_semcNandFcbOpt = cfgDict["cfgSemcNand"][1]
g_semcNandImageInfoList = cfgDict["cfgSemcNand"][2]
g_lpspiNorOpt0 = cfgDict["cfgLpspiNor"][0]
g_lpspiNorOpt1 = cfgDict["cfgLpspiNor"][1]
g_flexcommSpiNorOpt0 = cfgDict["cfgFlexcommSpiNor"][0]
g_flexcommSpiNorOpt1 = cfgDict["cfgFlexcommSpiNor"][1]
g_semcNorOpt = cfgDict["cfgSemcNor"][0]
g_semcNorSetting = cfgDict["cfgSemcNor"][1]
g_semcNorDeviceModel = cfgDict["cfgSemcNor"][2]
g_usdhcSdOpt = cfgDict["cfgUsdhcSd"][0]
g_usdhcMmcOpt0 = cfgDict["cfgUsdhcMmc"][0]
g_usdhcMmcOpt1 = cfgDict["cfgUsdhcMmc"][1]
g_dcdCtrlDict = cfgDict["cfgDcd"][0]
g_dcdSettingsDict = cfgDict["cfgDcd"][1]
g_certSettingsDict = cfgDict["cfgCertificate"][0]
g_signSettingsDict = cfgDict["cfgSignature"][0]
g_otpmkKeyCommDict = cfgDict["cfgSnvsKey"][0]
g_userKeyCtrlDict = cfgDict["cfgUserKey"][0]
g_userKeyCmdDict = cfgDict["cfgUserKey"][1]
else:
g_toolCommDict = {'toolRunMode':uidef.kToolRunMode_Master,
'isDymaticUsbDetection':True,
'soundEffectType':'contra',
'isSbFileEnabledToGen':False,
'isAutomaticImageReadback':False,
'flashloaderResident':None,
'efuseGroupSel':0,
'isAutomaticEfuseLocker':True,
'flexspiXipRegionSel':0,
'isIvtEntryResetHandler':False,
'isEnglishLanguage':True,
'secBootType':0,
'mcuSeries':0,
'mcuDevice':0,
'bootDevice':0,
'isUsbhidPortSelected':True,
'isOneStepChecked':True,
'certSerial':'12345678',
'certKeyPass':'<PASSWORD>',
'appFilename':None,
'appFormat':0,
'appBinBaseAddr':'Eg: 0x00003000',
'keyStoreRegion':1,
'certOptForHwCrypto':0
}
g_flexspiNorOpt0 = 0xc0000007
g_flexspiNorOpt1 = 0x00000000
g_flexspiNorDeviceModel = uidef.kFlexspiNorDevice_None
g_isFdcbKept = False
g_flexspiNandOpt0 = 0xC0010023
g_flexspiNandOpt1 = 0x0000002C
g_flexspiNandFcbOpt = 0xc2000104
g_flexspiNandImageInfoList = [None] * 8
g_flexspiNandImageInfoList[0] = 0x00040004
g_semcNandOpt = 0xD0010101
g_semcNandFcbOpt = 0x00010101
g_semcNandImageInfoList = [None] * 8
g_semcNandImageInfoList[0] = 0x00020001
g_lpspiNorOpt0 = 0xc1100500
g_lpspiNorOpt1 = 0x00000000
g_flexcommSpiNorOpt0 = 0xc0000000
g_flexcommSpiNorOpt1 = 0x00000000
g_semcNorOpt = 0xD0000600
g_semcNorSetting = 0x00010601
g_semcNorDeviceModel = uidef.kSemcNorDevice_None
g_usdhcSdOpt = 0xD0000000
g_usdhcMmcOpt0 = 0xC0000000
g_usdhcMmcOpt1 = 0x00000000
g_dcdCtrlDict['isDcdEnabled'] = False
g_dcdCtrlDict['dcdFileType'] = None
g_dcdSettingsDict['dcdSource'] = 'Disable DCD'
g_dcdSettingsDict['userBinFile'] = 'N/A'
g_dcdSettingsDict['userCfgFile'] = 'N/A'
g_dcdSettingsDict['dcdPurpose'] = 'SDRAM'
g_dcdSettingsDict['sdramBase'] = '0x80000000'
g_dcdSettingsDict['deviceModel'] = 'No'
g_dcdSettingsDict['dcdDesc'] = None
g_certSettingsDict['cstVersion'] = RTyyyy_uidef.kCstVersion_v3_0_1
g_certSettingsDict['useExistingCaKey'] = 'n'
g_certSettingsDict['useEllipticCurveCrypto'] = 'n'
g_certSettingsDict['pkiTreeKeyLen'] = 2048
g_certSettingsDict['pkiTreeDuration'] = 10
g_certSettingsDict['SRKs'] = 4
g_certSettingsDict['caFlagSet'] = 'y'
g_signSettingsDict['isPartSigned'] = False
g_signSettingsDict['signedStart0'] = 0x0
g_signSettingsDict['signedSize0'] = 0x0
g_signSettingsDict['signedStart1'] = 0x0
g_signSettingsDict['signedSize1'] = 0x0
g_signSettingsDict['signedStart2'] = 0x0
g_signSettingsDict['signedSize2'] = 0x0
g_otpmkKeyCommDict['opt'] = 0xe0100000
g_otpmkKeyCommDict['regionStartList'] = [None] * 4
g_otpmkKeyCommDict['regionLengthList'] = [None] * 4
g_userKeyCtrlDict['engine_sel'] = RTyyyy_uidef.kUserEngineSel_Engine0
g_userKeyCtrlDict['engine0_key_src'] = RTyyyy_uidef.kUserKeySource_SW_GP2
g_userKeyCtrlDict['engine0_fac_cnt'] = 1
g_userKeyCtrlDict['engine1_key_src'] = RTyyyy_uidef.kUserKeySource_SW_GP2
g_userKeyCtrlDict['engine1_fac_cnt'] = 1
g_userKeyCtrlDict['total_regions'] = 1
g_userKeyCtrlDict['kek_src'] = RTyyyy_uidef.kUserKeySource_SW_GP2
g_userKeyCmdDict['base_addr'] = '0x60000000'
g_userKeyCmdDict['hw_eng'] = 'bee'
g_userKeyCmdDict['engine0_key'] = '0123456789abcdeffedcba9876543210'
g_userKeyCmdDict['engine0_arg'] = '1,[0x60001000,0x1000,0]'
g_userKeyCmdDict['engine0_lock'] = '0'
g_userKeyCmdDict['engine1_key'] = '0123456789abcdeffedcba9876543210'
g_userKeyCmdDict['engine1_arg'] = '1,[0x60002000,0x1000,0]'
g_userKeyCmdDict['engine1_lock'] = '0'
g_userKeyCmdDict['use_zero_key'] = '1'
g_userKeyCmdDict['is_boot_image'] = '1'
g_userKeyCmdDict['kek'] = '0123456789abcdeffedcba9876543210'
g_userKeyCmdDict['otfad_arg'] = '[0123456789abcdeffedcba9876543210,0020406001030507,0x60001000,0x1000]'
g_userKeyCmdDict['scramble'] = '0x33aa55cc'
g_userKeyCmdDict['scramble_align'] = '0x1b'
g_userKeyCmdDict['otfad_ctx_lock'] = '0,0,0,0'
def deinitVar(cfgFilename=None):
global g_cfgFilename
if cfgFilename == None and g_cfgFilename != None:
cfgFilename = g_cfgFilename
with open(cfgFilename, 'w') as fileObj:
global g_toolCommDict
global g_flexspiNorOpt0
global g_flexspiNorOpt1
global g_flexspiNorDeviceModel
global g_isFdcbKept
global g_flexspiNandOpt0
global g_flexspiNandOpt1
global g_flexspiNandFcbOpt
global g_flexspiNandImageInfoList
global g_semcNandOpt
global g_semcNandFcbOpt
global g_semcNandImageInfoList
global g_lpspiNorOpt0
global g_lpspiNorOpt1
global g_flexcommSpiNorOpt0
global g_flexcommSpiNorOpt1
global g_usdhcSdOpt
global g_usdhcMmcOpt0
global g_usdhcMmcOpt1
global g_dcdCtrlDict
global g_dcdSettingsDict
global g_certSettingsDict
global g_signSettingsDict
global g_otpmkKeyCommDict
global g_userKeyCtrlDict
global g_userKeyCmdDict
cfgDict = {
"cfgToolCommon": [g_toolCommDict],
"cfgFlexspiNor": [g_flexspiNorOpt0, g_flexspiNorOpt1, g_flexspiNorDeviceModel, g_isFdcbKept],
"cfgFlexspiNand": [g_flexspiNandOpt0, g_flexspiNandOpt1, g_flexspiNandFcbOpt, g_flexspiNandImageInfoList],
"cfgSemcNor": [g_semcNorOpt, g_semcNorSetting, g_semcNorDeviceModel],
"cfgSemcNand": [g_semcNandOpt, g_semcNandFcbOpt, g_semcNandImageInfoList],
"cfgLpspiNor": [g_lpspiNorOpt0, g_lpspiNorOpt1],
"cfgFlexcommSpiNor": [g_flexcommSpiNorOpt0, g_flexcommSpiNorOpt1],
"cfgUsdhcSd": [g_usdhcSdOpt],
"cfgUsdhcMmc": [g_usdhcMmcOpt0, g_usdhcMmcOpt1],
"cfgDcd": [g_dcdCtrlDict, g_dcdSettingsDict],
"cfgCertificate": [g_certSettingsDict],
"cfgSignature": [g_signSettingsDict],
"cfgSnvsKey": [g_otpmkKeyCommDict],
"cfgUserKey": [g_userKeyCtrlDict, g_userKeyCmdDict]
}
json.dump(cfgDict, fileObj, indent=1)
fileObj.close()
def getBootDeviceConfiguration( group ):
if group == uidef.kBootDevice_XspiNor or \
group == RTyyyy_uidef.kBootDevice_FlexspiNor or \
group == RTxxx_uidef.kBootDevice_FlexspiNor or \
group == RTxxx_uidef.kBootDevice_QuadspiNor:
global g_flexspiNorOpt0
global g_flexspiNorOpt1
global g_flexspiNorDeviceModel
global g_isFdcbKept
return g_flexspiNorOpt0, g_flexspiNorOpt1, g_flexspiNorDeviceModel, g_isFdcbKept
elif group == RTyyyy_uidef.kBootDevice_FlexspiNand:
global g_flexspiNandOpt0
global g_flexspiNandOpt1
global g_flexspiNandFcbOpt
global g_flexspiNandImageInfoList
return g_flexspiNandOpt0, g_flexspiNandOpt1, g_flexspiNandFcbOpt, g_flexspiNandImageInfoList
elif group == RTyyyy_uidef.kBootDevice_SemcNor:
global g_semcNorOpt
global g_semcNorSetting
global g_semcNorDeviceModel
return g_semcNorOpt, g_semcNorSetting, g_semcNorDeviceModel
elif group == RTyyyy_uidef.kBootDevice_SemcNand:
global g_semcNandOpt
global g_semcNandFcbOpt
global g_semcNandImageInfoList
return g_semcNandOpt, g_semcNandFcbOpt, g_semcNandImageInfoList
elif group == RTyyyy_uidef.kBootDevice_UsdhcSd or \
group == RTxxx_uidef.kBootDevice_UsdhcSd:
global g_usdhcSdOpt
return g_usdhcSdOpt
elif group == RTyyyy_uidef.kBootDevice_UsdhcMmc or \
group == RTxxx_uidef.kBootDevice_UsdhcMmc:
global g_usdhcMmcOpt0
global g_usdhcMmcOpt1
return g_usdhcMmcOpt0, g_usdhcMmcOpt1
elif group == RTyyyy_uidef.kBootDevice_LpspiNor:
global g_lpspiNorOpt0
global g_lpspiNorOpt1
return g_lpspiNorOpt0, g_lpspiNorOpt1
elif group == RTxxx_uidef.kBootDevice_FlexcommSpiNor:
global g_flexcommSpiNorOpt0
global g_flexcommSpiNorOpt1
return g_flexcommSpiNorOpt0, g_flexcommSpiNorOpt1
elif group == RTyyyy_uidef.kBootDevice_Dcd:
global g_dcdCtrlDict
global g_dcdSettingsDict
return g_dcdCtrlDict, g_dcdSettingsDict
else:
pass
def setBootDeviceConfiguration( group, *args ):
if group == uidef.kBootDevice_XspiNor or \
group == RTyyyy_uidef.kBootDevice_FlexspiNor or \
group == RTxxx_uidef.kBootDevice_FlexspiNor or \
group == RTxxx_uidef.kBootDevice_QuadspiNor:
global g_flexspiNorOpt0
global g_flexspiNorOpt1
global g_flexspiNorDeviceModel
global g_isFdcbKept
g_flexspiNorOpt0 = args[0]
g_flexspiNorOpt1 = args[1]
g_flexspiNorDeviceModel = args[2]
g_isFdcbKept = args[3]
elif group == RTyyyy_uidef.kBootDevice_FlexspiNand:
global g_flexspiNandOpt0
global g_flexspiNandOpt1
global g_flexspiNandFcbOpt
global g_flexspiNandImageInfoList
g_flexspiNandOpt0 = args[0]
g_flexspiNandOpt1 = args[1]
g_flexspiNandFcbOpt = args[2]
g_flexspiNandImageInfoList = args[3]
elif group == RTyyyy_uidef.kBootDevice_SemcNor:
global g_semcNorOpt
global g_semcNorSetting
global g_semcNorDeviceModel
g_semcNorOpt = args[0]
g_semcNorSetting = args[1]
g_semcNorDeviceModel = args[2]
elif group == RTyyyy_uidef.kBootDevice_SemcNand:
global g_semcNandOpt
global g_semcNandFcbOpt
global g_semcNandImageInfoList
g_semcNandOpt = args[0]
g_semcNandFcbOpt = args[1]
g_semcNandImageInfoList = args[2]
elif group == RTyyyy_uidef.kBootDevice_UsdhcSd or \
group == RTxxx_uidef.kBootDevice_UsdhcSd:
global g_usdhcSdOpt
g_usdhcSdOpt = args[0]
elif group == RTyyyy_uidef.kBootDevice_UsdhcMmc or \
group == RTxxx_uidef.kBootDevice_UsdhcMmc:
global g_usdhcMmcOpt0
global g_usdhcMmcOpt1
g_usdhcMmcOpt0 = args[0]
g_usdhcMmcOpt1 = args[1]
elif group == RTyyyy_uidef.kBootDevice_LpspiNor:
global g_lpspiNorOpt0
global g_lpspiNorOpt1
g_lpspiNorOpt0 = args[0]
g_lpspiNorOpt1 = args[1]
elif group == RTxxx_uidef.kBootDevice_FlexcommSpiNor:
global g_flexcommSpiNorOpt0
global g_flexcommSpiNorOpt1
g_flexcommSpiNorOpt0 = args[0]
g_flexcommSpiNorOpt1 = args[1]
elif group == RTyyyy_uidef.kBootDevice_Dcd:
global g_dcdCtrlDict
global g_dcdSettingsDict
g_dcdCtrlDict = args[0]
g_dcdSettingsDict = args[1]
else:
pass
def getAdvancedSettings( group ):
if group == uidef.kAdvancedSettings_Tool:
global g_toolCommDict
return g_toolCommDict
elif group == uidef.kAdvancedSettings_Cert:
global g_certSettingsDict
return g_certSettingsDict
elif group == uidef.kAdvancedSettings_Sign:
global g_signSettingsDict
return g_signSettingsDict
elif group == uidef.kAdvancedSettings_OtpmkKey:
global g_otpmkKeyCommDict
return g_otpmkKeyCommDict
elif group == uidef.kAdvancedSettings_UserKeys:
global g_userKeyCtrlDict
global g_userKeyCmdDict
return g_userKeyCtrlDict, g_userKeyCmdDict
else:
pass
def setAdvancedSettings( group, *args ):
if group == uidef.kAdvancedSettings_Tool:
global g_toolCommDict
g_toolCommDict = args[0]
elif group == uidef.kAdvancedSettings_Cert:
global g_certSettingsDict
g_certSettingsDict = args[0]
elif group == uidef.kAdvancedSettings_Sign:
global g_signSettingsDict
g_signSettingsDict = args[0]
elif group == uidef.kAdvancedSettings_OtpmkKey:
global g_otpmkKeyCommDict
g_otpmkKeyCommDict = args[0]
elif group == uidef.kAdvancedSettings_UserKeys:
global g_userKeyCtrlDict
global g_userKeyCmdDict
g_userKeyCtrlDict = args[0]
g_userKeyCmdDict = args[1]
else:
pass
def getRuntimeSettings( ):
global g_hasSubWinBeenOpened
global g_exeTopRoot
global g_soundEffectType
global g_languageIndex
return g_hasSubWinBeenOpened, g_exeTopRoot, g_soundEffectType, g_languageIndex
def setRuntimeSettings( *args ):
global g_hasSubWinBeenOpened
if args[0] != None:
g_hasSubWinBeenOpened = args[0]
try:
global g_exeTopRoot
if args[1] != None:
g_exeTopRoot = args[1]
except:
pass
try:
global g_soundEffectType
if args[2] != None:
g_soundEffectType = args[2]
except:
pass
try:
global g_languageIndex
if args[3] != None:
g_languageIndex = args[3]
except:
pass
def getEfuseSettings( ):
global g_efuseDict
return g_efuseDict
def setEfuseSettings( *args ):
global g_efuseDict
g_efuseDict = args[0]
|
textattack/models/tokenizers/glove_tokenizer.py | xinzhel/TextAttack | 1,980 | 11136993 | <reponame>xinzhel/TextAttack
"""
Glove Tokenizer
^^^^^^^^^^^^^^^^^
"""
import json
import tempfile
import tokenizers as hf_tokenizers
class WordLevelTokenizer(hf_tokenizers.implementations.BaseTokenizer):
"""WordLevelTokenizer.
Represents a simple word level tokenization using the internals of BERT's
tokenizer.
Based off the `tokenizers` BertWordPieceTokenizer (https://github.com/huggingface/tokenizers/blob/704cf3fdd2f607ead58a561b892b510b49c301db/bindings/python/tokenizers/implementations/bert_wordpiece.py).
"""
def __init__(
self,
word_id_map={},
pad_token_id=None,
unk_token_id=None,
unk_token="[UNK]",
sep_token="[SEP]",
cls_token="[CLS]",
pad_token="[PAD]",
lowercase: bool = False,
unicode_normalizer=None,
):
if pad_token_id:
word_id_map[pad_token] = pad_token_id
if unk_token_id:
word_id_map[unk_token] = unk_token_id
max_id = max(word_id_map.values())
for idx, token in enumerate((unk_token, sep_token, cls_token, pad_token)):
if token not in word_id_map:
word_id_map[token] = max_id + idx
# HuggingFace tokenizer expects a path to a `*.json` file to read the
# vocab from. I think this is kind of a silly constraint, but for now
# we write the vocab to a temporary file before initialization.
word_list_file = tempfile.NamedTemporaryFile()
word_list_file.write(json.dumps(word_id_map).encode())
word_level = hf_tokenizers.models.WordLevel(
word_list_file.name, unk_token=str(unk_token)
)
tokenizer = hf_tokenizers.Tokenizer(word_level)
# Let the tokenizer know about special tokens if they are part of the vocab
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
if tokenizer.token_to_id(str(sep_token)) is not None:
tokenizer.add_special_tokens([str(sep_token)])
if tokenizer.token_to_id(str(cls_token)) is not None:
tokenizer.add_special_tokens([str(cls_token)])
if tokenizer.token_to_id(str(pad_token)) is not None:
tokenizer.add_special_tokens([str(pad_token)])
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [
hf_tokenizers.normalizers.unicode_normalizer_from_str(
unicode_normalizer
)
]
if lowercase:
normalizers += [hf_tokenizers.normalizers.Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = hf_tokenizers.normalizers.Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
tokenizer.pre_tokenizer = hf_tokenizers.pre_tokenizers.WhitespaceSplit()
sep_token_id = tokenizer.token_to_id(str(sep_token))
if sep_token_id is None:
raise TypeError("sep_token not found in the vocabulary")
cls_token_id = tokenizer.token_to_id(str(cls_token))
if cls_token_id is None:
raise TypeError("cls_token not found in the vocabulary")
tokenizer.post_processor = hf_tokenizers.processors.BertProcessing(
(str(sep_token), sep_token_id), (str(cls_token), cls_token_id)
)
parameters = {
"model": "WordLevel",
"unk_token": unk_token,
"sep_token": sep_token,
"cls_token": cls_token,
"pad_token": pad_token,
"lowercase": lowercase,
"unicode_normalizer": unicode_normalizer,
}
self.unk_token = unk_token
self.pad_token = pad_token
super().__init__(tokenizer, parameters)
class GloveTokenizer(WordLevelTokenizer):
"""A word-level tokenizer with GloVe 200-dimensional vectors.
Lowercased, since GloVe vectors are lowercased.
"""
def __init__(
self, word_id_map={}, pad_token_id=None, unk_token_id=None, max_length=256
):
super().__init__(
word_id_map=word_id_map,
unk_token_id=unk_token_id,
pad_token_id=pad_token_id,
lowercase=True,
)
self.pad_token_id = pad_token_id
self.oov_token_id = unk_token_id
self.convert_id_to_word = self.id_to_token
self.model_max_length = max_length
# Set defaults.
self.enable_padding(length=max_length, pad_id=pad_token_id)
self.enable_truncation(max_length=max_length)
def _process_text(self, text_input):
"""A text input may be a single-input tuple (text,) or multi-input
tuple (text, text, ...).
In the single-input case, unroll the tuple. In the multi-input
case, raise an error.
"""
if isinstance(text_input, tuple):
if len(text_input) > 1:
raise ValueError(
"Cannot use `GloveTokenizer` to encode multiple inputs"
)
text_input = text_input[0]
return text_input
def encode(self, text):
text = self._process_text(text)
return super().encode(text, add_special_tokens=False).ids
def batch_encode(self, input_text_list):
"""The batch equivalent of ``encode``."""
input_text_list = list(map(self._process_text, input_text_list))
encodings = self.encode_batch(
input_text_list,
add_special_tokens=False,
)
return [x.ids for x in encodings]
def __call__(self, input_texts):
if isinstance(input_texts, list):
return self.batch_encode(input_texts)
else:
return self.encode(input_texts)
def convert_ids_to_tokens(self, ids):
return [self.convert_id_to_word(_id) for _id in ids]
|
backend/server/server/urls.py | Bonifase/django-react | 508 | 11136997 | from django.contrib import admin
from django.urls import path
from apps.accounts.urls import accounts_urlpatterns
from apps.notes.urls import notes_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
]
urlpatterns += accounts_urlpatterns # add URLs for authentication
urlpatterns += notes_urlpatterns # notes URLs |
test/com/facebook/buck/skylark/parser/testdata/attr/int_list/defs.bzl | Unknoob/buck | 8,027 | 11137003 | <reponame>Unknoob/buck
""" Module docstring """
def well_formed():
""" Function docstring """
a = attr.int_list()
if repr(a) != "<attr.int_list>":
fail("Expected attr.int_list instance")
a = attr.int_list(mandatory = True, doc = "Some int_list", default = [1])
if repr(a) != "<attr.int_list>":
fail("Expected attr.int_list instance")
a = attr.int_list(mandatory = True, doc = "Some int_list", default = [1], allow_empty = True)
if repr(a) != "<attr.int_list>":
fail("Expected attr.int_list instance")
a = attr.int_list(mandatory = True, doc = "Some int_list", default = [1], allow_empty = False)
if repr(a) != "<attr.int_list>":
fail("Expected attr.int_list instance")
def malformed():
""" Function docstring """
_a = attr.int_list(mandatory = True, doc = "Some int_list", default = 3, allow_empty = True)
|
test/regression/features/lambda/lambda_arity2.py | ppelleti/berp | 137 | 11137012 | l = lambda x,y: x + y
print(l(3,9))
|
vumi/components/tests/test_window_manager.py | seidu626/vumi | 199 | 11137015 | <reponame>seidu626/vumi<filename>vumi/components/tests/test_window_manager.py<gh_stars>100-1000
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import Clock
from vumi.components.window_manager import WindowManager, WindowException
from vumi.tests.helpers import VumiTestCase, PersistenceHelper
class TestWindowManager(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.persistence_helper = self.add_helper(PersistenceHelper())
redis = yield self.persistence_helper.get_redis_manager()
self.window_id = 'window_id'
# Patch the clock so we can control time
self.clock = Clock()
self.patch(WindowManager, 'get_clock', lambda _: self.clock)
self.wm = WindowManager(redis, window_size=10, flight_lifetime=10)
self.add_cleanup(self.wm.stop)
yield self.wm.create_window(self.window_id)
self.redis = self.wm.redis
@inlineCallbacks
def test_windows(self):
windows = yield self.wm.get_windows()
self.assertTrue(self.window_id in windows)
def test_strict_window_recreation(self):
return self.assertFailure(
self.wm.create_window(self.window_id, strict=True),
WindowException)
@inlineCallbacks
def test_window_recreation(self):
orig_clock_time = self.clock.seconds()
clock_time = yield self.wm.create_window(self.window_id)
self.assertEqual(clock_time, orig_clock_time)
@inlineCallbacks
def test_window_removal(self):
yield self.wm.add(self.window_id, 1)
yield self.assertFailure(self.wm.remove_window(self.window_id),
WindowException)
key = yield self.wm.get_next_key(self.window_id)
item = yield self.wm.get_data(self.window_id, key)
self.assertEqual(item, 1)
self.assertEqual((yield self.wm.remove_window(self.window_id)), None)
@inlineCallbacks
def test_adding_to_window(self):
for i in range(10):
yield self.wm.add(self.window_id, i)
window_key = self.wm.window_key(self.window_id)
window_members = yield self.redis.llen(window_key)
self.assertEqual(window_members, 10)
@inlineCallbacks
def test_fetching_from_window(self):
for i in range(12):
yield self.wm.add(self.window_id, i)
flight_keys = []
for i in range(10):
flight_key = yield self.wm.get_next_key(self.window_id)
self.assertTrue(flight_key)
flight_keys.append(flight_key)
out_of_window_flight = yield self.wm.get_next_key(self.window_id)
self.assertEqual(out_of_window_flight, None)
# We should get data out in the order we put it in
for i, flight_key in enumerate(flight_keys):
data = yield self.wm.get_data(self.window_id, flight_key)
self.assertEqual(data, i)
# Removing one should allow for space for the next to fill up
yield self.wm.remove_key(self.window_id, flight_keys[0])
next_flight_key = yield self.wm.get_next_key(self.window_id)
self.assertTrue(next_flight_key)
@inlineCallbacks
def test_set_and_external_id(self):
yield self.wm.set_external_id(self.window_id, "flight_key",
"external_id")
self.assertEqual(
(yield self.wm.get_external_id(self.window_id, "flight_key")),
"external_id")
self.assertEqual(
(yield self.wm.get_internal_id(self.window_id, "external_id")),
"flight_key")
@inlineCallbacks
def test_remove_key_removes_external_and_internal_id(self):
yield self.wm.set_external_id(self.window_id, "flight_key",
"external_id")
yield self.wm.remove_key(self.window_id, "flight_key")
self.assertEqual(
(yield self.wm.get_external_id(self.window_id, "flight_key")),
None)
self.assertEqual(
(yield self.wm.get_internal_id(self.window_id, "external_id")),
None)
@inlineCallbacks
def assert_count_waiting(self, window_id, amount):
self.assertEqual((yield self.wm.count_waiting(window_id)), amount)
@inlineCallbacks
def assert_expired_keys(self, window_id, amount):
# Stuff has taken too long and so we should get 10 expired keys
expired_keys = yield self.wm.get_expired_flight_keys(window_id)
self.assertEqual(len(expired_keys), amount)
@inlineCallbacks
def assert_in_flight(self, window_id, amount):
self.assertEqual((yield self.wm.count_in_flight(window_id)),
amount)
@inlineCallbacks
def slide_window(self, limit=10):
for i in range(limit):
yield self.wm.get_next_key(self.window_id)
@inlineCallbacks
def test_expiry_of_acks(self):
def mock_clock_time(self):
return self._clocktime
self.patch(WindowManager, 'get_clocktime', mock_clock_time)
self.wm._clocktime = 0
for i in range(30):
yield self.wm.add(self.window_id, i)
# We're manually setting the clock instead of using clock.advance()
# so we can wait for the deferreds to finish before continuing to the
# next clear_expired_flight_keys run since LoopingCall() will only fire
# again if the previous run has completed.
yield self.slide_window()
self.wm._clocktime = 10
yield self.wm.clear_expired_flight_keys()
self.assert_expired_keys(self.window_id, 10)
yield self.slide_window()
self.wm._clocktime = 20
yield self.wm.clear_expired_flight_keys()
self.assert_expired_keys(self.window_id, 20)
yield self.slide_window()
self.wm._clocktime = 30
yield self.wm.clear_expired_flight_keys()
self.assert_expired_keys(self.window_id, 30)
self.assert_in_flight(self.window_id, 0)
self.assert_count_waiting(self.window_id, 0)
@inlineCallbacks
def test_monitor_windows(self):
yield self.wm.remove_window(self.window_id)
window_ids = ['window_id_1', 'window_id_2']
for window_id in window_ids:
yield self.wm.create_window(window_id)
for i in range(20):
yield self.wm.add(window_id, i)
key_callbacks = {}
def callback(window_id, key):
key_callbacks.setdefault(window_id, []).append(key)
cleanup_callbacks = []
def cleanup_callback(window_id):
cleanup_callbacks.append(window_id)
yield self.wm._monitor_windows(callback, False)
self.assertEqual(set(key_callbacks.keys()), set(window_ids))
self.assertEqual(len(key_callbacks.values()[0]), 10)
self.assertEqual(len(key_callbacks.values()[1]), 10)
yield self.wm._monitor_windows(callback, False)
# Nothing should've changed since we haven't removed anything.
self.assertEqual(len(key_callbacks.values()[0]), 10)
self.assertEqual(len(key_callbacks.values()[1]), 10)
for window_id, keys in key_callbacks.items():
for key in keys:
yield self.wm.remove_key(window_id, key)
yield self.wm._monitor_windows(callback, False)
# Everything should've been processed now
self.assertEqual(len(key_callbacks.values()[0]), 20)
self.assertEqual(len(key_callbacks.values()[1]), 20)
# Now run again but cleanup the empty windows
self.assertEqual(set((yield self.wm.get_windows())), set(window_ids))
for window_id, keys in key_callbacks.items():
for key in keys:
yield self.wm.remove_key(window_id, key)
yield self.wm._monitor_windows(callback, True, cleanup_callback)
self.assertEqual(len(key_callbacks.values()[0]), 20)
self.assertEqual(len(key_callbacks.values()[1]), 20)
self.assertEqual((yield self.wm.get_windows()), [])
self.assertEqual(set(cleanup_callbacks), set(window_ids))
class TestConcurrentWindowManager(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.persistence_helper = self.add_helper(PersistenceHelper())
redis = yield self.persistence_helper.get_redis_manager()
self.window_id = 'window_id'
# Patch the count_waiting so we can fake the race condition
self.clock = Clock()
self.patch(WindowManager, 'count_waiting', lambda _, window_id: 100)
self.wm = WindowManager(redis, window_size=10, flight_lifetime=10)
self.add_cleanup(self.wm.stop)
yield self.wm.create_window(self.window_id)
self.redis = self.wm.redis
@inlineCallbacks
def test_race_condition(self):
"""
A race condition can occur when multiple window managers try and
access the same window at the same time.
A LoopingCall loops over the available windows, for those windows
it tries to get a next key. It does that by checking how many are
waiting to be sent out and adding however many it can still carry
to its own flight.
Since there are concurrent workers, between the time of checking how
many are available and how much room it has available, a different
window manager may have already beaten it to it.
If this happens Redis' `rpoplpush` method will return None since
there are no more available keys for the given window.
"""
yield self.wm.add(self.window_id, 1)
yield self.wm.add(self.window_id, 2)
yield self.wm._monitor_windows(lambda *a: True, True)
self.assertEqual((yield self.wm.get_next_key(self.window_id)), None)
|
inclearn/models/zil.py | Zotkin/incremental_learning.pytorch | 277 | 11137033 | import collections
import copy
import functools
import logging
import math
import os
import pickle
import numpy as np
import torch
from sklearn import preprocessing as skpreprocessing
from sklearn.svm import SVC
from sklearn.utils.class_weight import compute_class_weight
from torch import nn
from torch.nn import functional as F
from inclearn.lib import data, distance, factory, loops, losses, network, utils
from inclearn.lib.data import samplers
from inclearn.lib.network.autoencoder import AdvAutoEncoder
from inclearn.lib.network.classifiers import (BinaryCosineClassifier,
DomainClassifier)
from inclearn.lib.network.word import Word2vec
from inclearn.models.icarl import ICarl
logger = logging.getLogger(__name__)
class ZIL(ICarl):
def __init__(self, args):
self._disable_progressbar = args.get("no_progressbar", False)
self._device = args["device"][0]
self.device = args["device"][0]
self._multiple_devices = args["device"]
self._weight_decay = args["weight_decay"]
# Steps definition
self.unsupervised_config = args.get("unsupervised", {})
self.supervised_config = args.get("supervised", {})
self.gmmn_config = args.get("gmmn", {})
self.autoencoder_config = args.get("autoencoder", {})
self.fakeclassifier_config = args.get("fake_classifier", {})
# Losses definition
self._pod_spatial_config = args.get("pod_spatial", {})
self._pod_flat_config = args.get("pod_flat", {})
self.ghost_config = args.get("ghost_regularization", {})
self.real_config = args.get("semantic_regularization", {})
self.hyperplan_config = args.get("hyperplan_regularization", {})
self.placement_config = args.get("ghost_placement_config", {})
self.adv_placement_config = args.get("adv_ghost_placement_config", {})
self.ucir_ranking_config = args.get("ucir_ranking", {})
self._nca_config = args.get("nca", {})
self._softmax_ce = args.get("softmax_ce", False)
logger.info("Initializing ZIL")
self._network = network.BasicNet(
args["convnet"],
convnet_kwargs=args.get("convnet_config", {}),
classifier_kwargs=args.get("classifier_config"),
postprocessor_kwargs=args.get("postprocessor_config", {}),
device=self._device,
extract_no_act=True,
classifier_no_act=args.get("classifier_no_act", True),
return_features=True,
attention_hook=True,
rotations_predictor=True
)
self.args = args
self._new_weights_config = args.get("weight_generation", {"type": "imprinted"})
self._w2v_path = args.get("word2vec_path") or args.get("data_path")
if args.get("word_embeddings"):
self._word_embeddings = Word2vec(
**args["word_embeddings"], data_path=self._w2v_path, device=self._device
)
else:
self._word_embeddings = None
self._old_word_embeddings = None
self._args_we = args.get("word_embeddings")
self._args_ae = args.get("autoencoder_archi")
self._n_classes = 0
self._old_model = None
self._data_memory, self._targets_memory = None, None
self._examplars = {}
self._class_means = None
self._herding_indexes = []
self._fixed_memory = args.get("fixed_memory", True)
self._memory_size = args["memory_size"]
self._herding_selection = {"type": "icarl"}
self._all_test_classes = args["all_test_classes"]
self._preprocessing = None
self._gen_preprocessing = None
self._svm = None
self._ghosts = None
self._old_ghosts = None
self._saved_weights = None
self._cheat_pixels = None
# ---------
# Utilities
# ---------
def save_metadata(self, directory, run_id):
path = os.path.join(directory, f"meta_{run_id}_task_{self._task}.pkl")
logger.info("Saving metadata at {}.".format(path))
with open(path, "wb+") as f:
pickle.dump(
[self._data_memory, self._targets_memory, self._herding_indexes, self._class_means],
f
)
def load_metadata(self, directory, run_id):
path = os.path.join(directory, f"meta_{run_id}_task_{self._task}.pkl")
if not os.path.exists(path):
return
logger.info("Loading metadata at {}.".format(path))
with open(path, "rb") as f:
self._data_memory, self._targets_memory, self._herding_indexes, self._class_means = pickle.load(
f
)
def save_parameters(self, directory, run_id):
path = os.path.join(directory, f"net_{run_id}_task_{self._task}.pth")
logger.info(f"Saving model at {path}.")
torch.save(self.network.state_dict(), path)
if self._word_embeddings is not None:
path = os.path.join(directory, f"gen_{run_id}_task_{self._task}.pth")
logger.info(f"Saving generator at {path}.")
torch.save(self._word_embeddings.state_dict(), path)
def load_parameters(self, directory, run_id):
path = os.path.join(directory, f"net_{run_id}_task_{self._task}.pth")
if not os.path.exists(path):
return
logger.info(f"Loading model at {path}.")
try:
self.network.load_state_dict(torch.load(path, map_location=self._device), strict=False)
except Exception as e:
logger.warning(f"Old method to save weights, it's deprecated!: {e}")
self._network = torch.load(path)
self.network.to(self._device)
if self._saved_weights is not None:
logger.info("Keeping previous weights anyway")
self._network.classifier._weights = self._saved_weights
path = os.path.join(directory, f"gen_{run_id}_task_{self._task}.pth")
if os.path.exists(path):
logger.info(f"Loading generator at {path}.")
try:
self._word_embeddings.load_state_dict(
torch.load(path, map_location=self._device), strict=False
)
self._word_embeddings.to(self._device)
except Exception:
logger.warning("Failed to reload generator, it was probably changed.")
def get_class_label(self, fake_class_ids):
"""Class id in-code ==> Class id in datasets."""
return torch.tensor([self.inc_dataset.class_order[0][i] for i in fake_class_ids])
def get_inv_class_label(self, true_class_ids):
"""Class id in datasets ==> Class id in-code."""
return torch.tensor([self.inc_dataset.class_order[0].index(i) for i in true_class_ids])
@staticmethod
def get_param_groups(network, config, base_lr, additional_parameters=None):
"""Returns the parameters per group with their own learning rate.
:param network: The network whose parameters will be optimized.
:param config: The config defining which parameters are learned, and how much.
:param default_lr: A base learning rate
:return: A list of dicts likewise {"params": <parameters>, "lr": <lr>}.
"""
groups = []
parameters_dict = [network.get_group_parameters()]
if additional_parameters is not None:
parameters_dict.append(additional_parameters)
for group_dict in parameters_dict:
for group_name, group_parameters in group_dict.items():
if group_parameters is None or group_name not in config:
continue
group_lr = config.get(group_name, 1.0) * base_lr
logger.info(f"{group_name}, lr: {group_lr}")
groups.append({"params": group_parameters, "lr": group_lr})
return groups
def setup_training(self, config, additional_parameters=None):
groups = self.get_param_groups(
self._network,
config["groupwise_lr"],
config["lr"],
additional_parameters=additional_parameters
)
optimizer = factory.get_optimizer(
groups, config["optimizer"], config["lr"],
config.get("weight_decay", self._weight_decay)
)
scheduler = factory.get_lr_scheduler(
config["scheduling"], optimizer, nb_epochs=config["epochs"], task=self._task
)
return optimizer, scheduler
# ---------------------------
# Crude descriptions of steps
# Remember that resume skip the training step but do the before and after task.
# ---------------------------
def _before_task(self, train_loader, val_loader):
self._cheat_pixels = None
if self._task > 0 and self._task != self._n_tasks - 1:
logger.info("Setup of the constraints...")
self._n_classes += self._task_size
self._old_ghosts = copy.deepcopy(self._ghosts)
self._setup_constraints()
self._n_classes -= self._task_size
if self._task > 1 and self._new_weights_config["type"] == "ghosts":
self._new_weights_config["ghosts"] = self._old_ghosts
if self._task == 0:
utils.add_new_weights(
self._network, {"type": "basic"}, self._n_classes, self._task_size, self.inc_dataset
)
elif self._task == 1:
utils.add_new_weights(
self._network, {"type": "imprinted"}
if self._network.classifier.classifier_type == "cosine" else {"type": "basic"},
self._n_classes, self._task_size, self.inc_dataset
)
elif self._new_weights_config["type"] == "neg_weights":
# Take the neg weights
logger.info("Promoting Ghost Centroids to fist-class status.")
neg_weights = self._network.classifier._negative_weights
to_promote = neg_weights[:self._task_size].data
self._network.classifier.add_custom_weights(
to_promote, ponderate=self._new_weights_config.get("ponderate")
)
else: # Take mean of ghost per class
utils.add_new_weights(
self._network, self._new_weights_config, self._n_classes, self._task_size,
self.inc_dataset
)
if self._task == self._n_tasks - 1:
# If we are on last task, disable negative weights
self._network.classifier._negative_weights = None
self._n_classes += self._task_size
if "ghosts" in self._new_weights_config:
del self._new_weights_config["ghosts"]
def _train_task(self, train_loader, val_loader):
if self._cheat_pixels is not None:
train_loader.dataset.x = np.concatenate((train_loader.dataset.x, self._cheat_pixels[0]))
train_loader.dataset.y = np.concatenate((train_loader.dataset.y, self._cheat_pixels[1]))
train_loader.dataset.memory_flags = np.concatenate(
(train_loader.dataset.memory_flags, self._cheat_pixels[2])
)
if self.placement_config.get("initial_centroids"):
_, loader = self.inc_dataset.get_custom_loader(
list(range(self._n_classes - self._task_size, self._n_classes))
)
c_features, c_targets = utils.compute_centroids(self._network, loader)
self._c_features = torch.tensor(c_features).float().to(self._device)
self._c_targets = torch.tensor(c_targets).long().to(self._device)
else:
self._c_features, self._c_targets = None, None
if self._task > 0 and self.adv_placement_config:
self._network.create_domain_classifier()
if self.unsupervised_config \
and ((self._task > 0 and not self.unsupervised_config.get("only_first", False)) \
or self._task == 0):
self.train_unsupervised(train_loader, val_loader)
if self.supervised_config:
self.train_supervised(train_loader, val_loader)
def _after_task(self, inc_dataset):
if self._task != self._n_tasks - 1:
if self.gmmn_config:
self.train_gmmn()
elif self.autoencoder_config:
self.train_autoencoder()
if self._task > 0 and self.adv_placement_config:
self._network.del_domain_classifier()
self._old_model = self._network.copy().eval().to(self._device)
self._network.on_task_end()
if self._word_embeddings is not None:
self._old_word_embeddings = copy.deepcopy(self._word_embeddings)
self._old_word_embeddings.eval().to(self._device)
def _eval_task(self, loader):
self.eval()
ypred, ytrue = [], []
if self.fakeclassifier_config and self._task != self._n_tasks - 1:
logger.info("Generating weights for unseen classes.")
real_clf_weights = copy.deepcopy(self._network.classifier._weights)
nb_unseen_classes = self._total_n_classes - self._n_classes
if self.fakeclassifier_config.get("what_post"):
postprocessor = copy.deepcopy(self._network.post_processor)
if isinstance(self.fakeclassifier_config.get("what_post"), float):
self._network.post_processor.factor.data.fill_(
self.fakeclassifier_config.get("what_post", 1.0)
)
if hasattr(
self._network.classifier, "_bias"
) and self._network.classifier._bias is not None:
real_clf_bias = copy.deepcopy(self._network.classifier._bias)
if self.fakeclassifier_config["what"] == "new":
self._network.classifier._weights.append(
nn.Parameter(
torch.randn(
nb_unseen_classes * self._network.classifier.proxy_per_class,
self._network.convnet.out_dim
)
)
)
if hasattr(
self._network.classifier, "_bias"
) and self._network.classifier._bias is not None:
logging.info("Generating also bias.")
self._network.classifier._bias.append(
nn.Parameter(torch.zeros(nb_unseen_classes))
)
elif self.fakeclassifier_config["what"] == "new_scaleOld":
self._network.classifier._weights = nn.ParameterList(
[
nn.Parameter(
self._preprocessing.transform(self._network.classifier.weights.data)
),
nn.Parameter(
torch.randn(
nb_unseen_classes * self._network.classifier.proxy_per_class,
self._network.convnet.out_dim
)
)
]
)
elif self.fakeclassifier_config["what"] == "negative":
params = [
nn.Parameter(
self._preprocessing.transform(self._network.classifier.weights.data)
)
]
if self._task == 0:
params.append(
nn.Parameter(
torch.randn(
nb_unseen_classes * self._network.classifier.proxy_per_class,
self._network.convnet.out_dim
)
)
)
elif isinstance(self._network.classifier._negative_weights, nn.Parameter):
params.append(
nn.Parameter(
self._preprocessing.transform(
self._network.classifier._negative_weights.data
)
)
)
else:
params.append(
nn.Parameter(
self._preprocessing.transform(
self._network.classifier._negative_weights
)
)
)
self._network.classifier._weights = nn.ParameterList(params)
elif self.fakeclassifier_config["what"] == "negative_no_tsrf":
params = [nn.Parameter(self._network.classifier.weights.data)]
if self._task == 0:
params.append(
nn.Parameter(torch.randn(nb_unseen_classes, self._network.convnet.out_dim))
)
elif isinstance(self._network.classifier._negative_weights, nn.Parameter):
params.append(nn.Parameter(self._network.classifier._negative_weights.data))
else:
params.append(nn.Parameter(self._network.classifier._negative_weights))
self._network.classifier._weights = nn.ParameterList(params)
elif self.fakeclassifier_config["what"] == "negative_scaled":
self._network.classifier._weights = nn.ParameterList(
[
nn.Parameter(
self._preprocessing.transform(self._network.classifier.weights.data)
),
nn.Parameter(
self._preprocessing.transform(
self._network.classifier._negative_weights
)
)
]
)
elif self.fakeclassifier_config["what"] == "all":
self._network.classifier._weights = nn.ParameterList(
[
nn.Parameter(
torch.randn(
self._n_classes * self._network.classifier.proxy_per_class,
self._network.convnet.out_dim
)
),
nn.Parameter(
torch.randn(
nb_unseen_classes * self._network.classifier.proxy_per_class,
self._network.convnet.out_dim
)
)
]
)
if hasattr(
self._network.classifier, "_bias"
) and self._network.classifier._bias is not None:
logging.info("Generating also bias.")
self._network.classifier._bias = nn.ParameterList(
[
nn.Parameter(torch.randn(self._n_classes)),
nn.Parameter(torch.randn(nb_unseen_classes))
]
)
nn.init.kaiming_normal_(self._network.classifier._weights[0], nonlinearity="linear")
nn.init.kaiming_normal_(self._network.classifier._weights[1], nonlinearity="linear")
else:
raise ValueError(self.fakeclassifier_config["what"])
self._network.classifier.to(self._device)
if "training" in self.fakeclassifier_config:
if self.fakeclassifier_config.get("only_first_task") and self._task == 0:
self.train_fake_classifier(self.fakeclassifier_config["training"])
has_trained = True
elif not self.fakeclassifier_config.get("only_first_task", False):
self.train_fake_classifier(self.fakeclassifier_config["training"])
has_trained = True
else:
has_trained = False
else:
has_trained = False
if self.fakeclassifier_config.get("postprocessing", "none") == "align_weights":
self._network.classifier.align_weights()
elif self.fakeclassifier_config.get("postprocessing", "none") == "align_inv_weights":
self._network.classifier.align_inv_weights()
elif self.fakeclassifier_config.get(
"postprocessing", "none"
) == "align_inv_weights_unseen":
logger.info("Align unseen to seen.")
self._network.classifier.align_weights_i_to_j(
list(range(self._n_classes)),
list(range(self._n_classes, self._total_n_classes))
)
else:
self._preprocessing = None
self._network.eval()
logger.info("Evaluating model...")
for input_dict in loader:
with torch.no_grad():
if self._preprocessing is None or not has_trained:
logits = self._network(input_dict["inputs"].to(self._device))["logits"]
else:
features = self._network.convnet(
input_dict["inputs"].to(self._device)
)[self.gmmn_config.get("features_key", "raw_features")]
if self.fakeclassifier_config:
features = self._preprocessing.transform(features)
if self._svm is None:
logits = self._network.classifier(features)["logits"]
else:
preds = self._svm.predict(features.cpu().numpy())
nb_classes = self._network.classifier.weights.shape[1]
logits = np.zeros((len(preds), nb_classes))
logits[np.arange(len(logits)), preds] = 1.0
ytrue.append(input_dict["targets"])
ypred.append(logits)
self._network.train()
if self._svm is not None:
ypred = np.concatenate(ypred)
else:
ypred = torch.cat(ypred)
ypred = F.softmax(ypred, dim=1)
ypred = ypred.cpu().numpy()
ytrue = torch.cat(ytrue).numpy()
if self._task != self._n_tasks - 1 and self.fakeclassifier_config:
if self.fakeclassifier_config.get("threshold") is not None:
threshold1, threshold2 = self.fakeclassifier_config.get("threshold")
logger.info(f"Using threshold ({threshold1}, {threshold2}).")
maxes = ypred[..., :self._n_classes].max(axis=1)
logger.info(f"Best confidence mean={maxes.mean()}, max={maxes.max()}.")
ypred[maxes < threshold1, :self._n_classes] = 0.
ypred[maxes > threshold2, self._n_classes:] = 0.
elif self.fakeclassifier_config.get("bias") is not None:
bias = self.fakeclassifier_config.get("bias")
logger.info(f"Using bias {bias}.")
ypred[..., :self._n_classes] += bias
if self.fakeclassifier_config and self._task != self._n_tasks - 1:
if self.fakeclassifier_config.get("keep_weights", "") == "all":
logger.info("Keeping finetuned weights")
self._network.classifier._weights = nn.ParameterList(
[self._network.classifier._weights[0]]
)
if self.fakeclassifier_config.get("keep_weights", "") == "all_not_first":
if self._task == 0:
if self.fakeclassifier_config.get("what_post"):
self._network.post_processor = postprocessor
self._network.classifier._weights = real_clf_weights
else:
logger.info("Keeping finetuned weights")
self._network.classifier._weights = nn.ParameterList(
[self._network.classifier._weights[0]]
)
else:
if self.fakeclassifier_config.get("what_post"):
self._network.post_processor = postprocessor
self._network.classifier._weights = real_clf_weights
if hasattr(
self._network.classifier, "_bias"
) and self._network.classifier._bias is not None:
self._network.classifier._bias = real_clf_bias
self.train()
return ypred, ytrue
# --------------
# Training steps
# --------------
def train_unsupervised(self, train_loader, val_loader):
logger.info("Training ConvNet with rotations prediction.")
optimizer, scheduler = self.setup_training(self.unsupervised_config)
loops.single_loop(
train_loader,
val_loader,
self._multiple_devices,
self._network,
self.unsupervised_config["epochs"],
optimizer,
scheduler=scheduler,
train_function=self.forward_unsupervised,
eval_function=self._accuracy,
task=self._task,
n_tasks=self._n_tasks,
disable_progressbar=self._disable_progressbar
)
def train_supervised(self, train_loader, val_loader):
logger.info("Finetuning ConvNet and classifier")
if not isinstance(self.supervised_config, list):
self.supervised_config = [self.supervised_config]
for config in self.supervised_config:
if not config["first_task"] and self._task == 0:
continue
if config.get("only_first_task") and self._task != 0:
continue
if config.get("min_task", 0) > self._task:
continue
if config.get("sampling", "none") == "undersample":
self._data_memory, self._targets_memory, _, _ = self.build_examplars(
self.inc_dataset, self._herding_indexes
)
loader = self.inc_dataset.get_memory_loader(*self.get_memory())
else:
loader = train_loader
if config.get("class_weights"):
logger.info("Computing class weights")
self._class_weights = self.get_class_weights(loader)
else:
self._class_weights = None
if config.get("update_gmmn", False) and self._task not in (0, self._n_tasks - 1):
self.train_gmmn()
self._setup_constraints()
if config.get("update_constraints", False) and self._task not in (0, self._n_tasks - 1):
self._setup_constraints()
if config.get("update_sdc", False) and self._task not in (0, self._n_tasks - 1):
logger.info("Update SDC")
old_features, _ = utils.extract_features(self._old_model, loader)
new_features, targets = utils.extract_features(self._network, loader)
drift = losses.semantic_drift_compensation(
torch.tensor(old_features).to(self._device),
torch.tensor(new_features).to(self._device),
torch.tensor(targets).to(self._device)
)
with torch.no_grad():
self._ghosts = (self._ghosts[0] + drift, self._ghosts[1])
if self._network.classifier._negative_weights is not None:
self._network.classifier._negative_weights = self._network.classifier._negative_weights + drift
if config.get("del_neg_weights", False):
logger.info("Disabling neg weights & ghosts.")
self._network.classifier.use_neg_weights = False
optimizer, scheduler = self.setup_training(config)
loops.single_loop(
loader,
val_loader,
self._multiple_devices,
self._network,
config["epochs"],
optimizer,
scheduler=scheduler,
train_function=self.forward_supervised,
eval_function=self._accuracy,
task=self._task,
n_tasks=self._n_tasks,
config=config,
disable_progressbar=self._disable_progressbar
)
if config.get("align_weights") and self._task > 0:
self._network.classifier.align_weights()
if config.get("del_neg_weights", False):
logger.info("Re-enabling neg weights & ghosts.")
self._network.classifier.use_neg_weights = True
def train_gmmn(self):
logger.info("Training generator GMMN")
config = self.gmmn_config
# Fucking ugly, do something about it!
if config.get("only_first") and self._task == 0:
to_train = True
elif config.get("only_first") and self._task > 0:
to_train = False
else:
to_train = True
if to_train:
if config.get("reinit", "always") == "always" \
or (config.get("reinit", "first") and self._task == 0):
logger.info("Reinit GMMN")
self._word_embeddings = Word2vec(
**self._args_we, data_path=self._w2v_path, device=self._device
)
elif config.get("reinit") not in ("always", "first"):
raise NotImplementedError(f"Unknown value for GMMN: {config.get('reinit')}.")
optimizer = factory.get_optimizer(
[{
"params": self._word_embeddings.parameters(),
"lr": config["lr"]
}], config["optimizer"], config["lr"], config.get("weight_decay", self._weight_decay)
)
if config.get("preprocessing"):
if isinstance(config["preprocessing"], list):
self._preprocessing = Scaler(config["preprocessing"])
elif config["preprocessing"] == "robust":
self._preprocessing = Scaler((0, 1), robust=1)
elif config["preprocessing"] == "robust_scaled":
self._preprocessing = Scaler((0, 1), robust=2)
elif config["preprocessing"] == "normalize":
self._preprocessing = Scaler((0, 1), normalize=True)
elif config["preprocessing"] == "normalize_only":
self._preprocessing = Scaler((0, 1), robust=-1, normalize=True)
elif config["preprocessing"] == "normalize_truncate":
self._preprocessing = Scaler((0, 1), robust=-1, normalize=True, truncate=True)
elif config["preprocessing"] == "l2":
self._preprocessing = Normalizer()
else:
raise ValueError(f"Unknown preprocessing: {config['preprocessing']}.")
self._visual_features, self._visual_targets = loops.perclass_loop(
self.inc_dataset,
list(range(0, self._n_classes)), # All seen classes
self._multiple_devices,
config["epochs"] if to_train else 0,
optimizer,
self.forward_gmmn,
self._task,
self._n_tasks,
network=self._network,
word_embeddings=self._word_embeddings,
target_to_word=self.get_class_label,
disable_progressbar=self._disable_progressbar,
scheduler=factory.get_lr_scheduler(
config.get("scheduling"), optimizer, nb_epochs=config["epochs"]
),
batch_size=config.get("batch_size", 128),
preprocessing=self._preprocessing,
memory_class_ids=[]
if self._task == 0 else list(range(self._n_classes - self._task_size)),
memory=self.get_memory(),
features_key=config.get("features_key", "raw_features")
)
if config.get("linear"):
self._word_embeddings.eval()
self._word_embeddings.add_linear_transform(bias=config["linear"]["bias"])
self._word_embeddings.linear_transform.train()
loops.linear_loop(
self._visual_features,
self._visual_targets,
self._multiple_devices,
config["linear"]["epochs"] if to_train else 0,
factory.get_optimizer(
[
{
"params": self._word_embeddings.linear_transform.parameters(),
"lr": config["linear"]["lr"]
}
], config["linear"]["optimizer"], config["linear"]["lr"],
config.get("weight_decay", self._weight_decay)
),
self.forward_gmmn_linear,
self._task,
self._n_tasks,
word_embeddings=self._word_embeddings,
target_to_word=self.get_class_label,
disable_progressbar=self._disable_progressbar,
scheduler=factory.get_lr_scheduler(
config["linear"].get("scheduling"),
optimizer,
nb_epochs=config["linear"]["epochs"]
),
batch_size=config["linear"].get("batch_size", 128),
normalize=config["linear"].get("normalize", False)
)
def train_autoencoder(self):
logger.info("Training generator Adverserial AutoEncoder")
config = self.autoencoder_config
# Fucking ugly, do something about it!
if config.get("only_first") and self._task == 0:
to_train = True
elif config.get("only_first") and self._task > 0:
to_train = False
else:
to_train = True
if to_train:
if config.get("reinit", "always") == "always" \
or (config.get("reinit", "first") and self._task == 0):
logger.info("Reinit AdvAutoEncoder")
self._autoencoder = AdvAutoEncoder(**self._args_ae, device=self._device)
elif config.get("reinit") not in ("always", "first"):
raise NotImplementedError(f"Unknown value for AdvAE: {config.get('reinit')}.")
if config.get("preprocessing"):
if isinstance(config["preprocessing"], list):
self._preprocessing = Scaler(config["preprocessing"])
elif config["preprocessing"] == "l2":
self._preprocessing = Normalizer()
else:
raise ValueError(f"Unknown preprocessing: {config['preprocessing']}.")
self._visual_features, self._visual_targets = loops.adv_autoencoder_loop(
self.inc_dataset,
list(range(0, self._n_classes)), # All seen classes
self._multiple_devices,
config["epochs"] if to_train else 0,
self._task,
self._n_tasks,
network=self._network,
autoencoder=self._autoencoder,
target_to_word=self.get_class_label,
disable_progressbar=self._disable_progressbar,
batch_size=config.get("batch_size", 128),
preprocessing=self._preprocessing,
memory_class_ids=[]
if self._task == 0 else list(range(self._n_classes - self._task_size)),
memory=self.get_memory()
)
def train_fake_classifier(self, config):
logger.info("Finetuning ConvNet and classifier")
if config.get("adversarial_classifier"):
self._domain_classifier = DomainClassifier(
self._network.convnet.out_dim, device=self._device
)
optimizer, scheduler = self.setup_training(
config,
additional_parameters={"domain_classifier": self._domain_classifier.parameters()}
)
else:
self._domain_classifier = None
optimizer, scheduler = self.setup_training(config)
logger.info("Finetuning fake classifier with offline generation.")
if self._word_embeddings is not None:
self._word_embeddings.eval()
else:
self._autoencoder.eval()
if "look_ahead" in config:
max_class = config.get("look_ahead") + self._n_classes
else:
max_class = self._total_n_classes
if isinstance(config["nb_samples"], int):
nb_samples = config["nb_samples"]
else:
nb_samples = int(torch.bincount(self._visual_targets).float().mean().cpu().item())
logger.info(f"Gen {nb_samples} based on mean bincount.")
fake_features, fake_targets = [], []
for class_id in range(self._n_classes, max_class):
targets = [class_id for _ in range(nb_samples)]
cifar_targets = self.get_class_label(targets).to(self._device)
with torch.no_grad():
if self._word_embeddings is not None:
fake_features.append(self._word_embeddings(cifar_targets))
else:
fake_features.append(self._autoencoder.generate(cifar_targets))
fake_targets.append(torch.tensor(targets).long().to(self._device))
fake_features = torch.cat(fake_features)
if self._word_embeddings is not None:
self._word_embeddings.train()
else:
self._autoencoder.train()
if config.get("preprocessing"):
logger.info("fake features preprocessing")
if isinstance(config["preprocessing"], list):
self._gen_preprocessing = Scaler(config["preprocessing"])
fake_features = self._gen_preprocessing.fit_transform(fake_features)
elif config["preprocessing"] == "l2":
self._gen_preprocessing = Normalizer()
fake_features = self._gen_preprocessing.fit_transform(fake_features)
elif config["preprocessing"] == "reuse":
self._gen_preprocessing = self._preprocessing
fake_features = self._preprocessing.transform(fake_features)
else:
raise ValueError(f"Unknown preprocessing: {config['preprocessing']}.")
features = torch.cat([self._visual_features, fake_features])
targets = torch.cat([self._visual_targets.to(self._device), *fake_targets])
flags = torch.cat((torch.ones(len(self._visual_features)), torch.zeros(len(fake_features))))
if not isinstance(config.get("class_weights"), str) and config.get("class_weights") is True:
logger.info("Computing class weights.")
np_targets = targets.cpu().numpy()
unique_targets = np.unique(np_targets)
class_weights = compute_class_weight('balanced', unique_targets, np_targets)
self._class_weights_fake = torch.tensor(class_weights).to(self._device).float()
elif config.get("class_weights", "") == "mean":
logger.info("Computing class weights MEAN mode.")
np_targets = self._visual_targets.cpu().numpy()
unique_targets = np.unique(np_targets)
class_weights = compute_class_weight('balanced', unique_targets, np_targets)
class_weights_unseen = np.ones(max_class - self._n_classes) * class_weights.mean()
class_weights = np.concatenate((class_weights, class_weights_unseen))
self._class_weights_fake = torch.tensor(class_weights).to(self._device).float()
elif config.get("class_weights", "") == "one":
logger.info("Computing class weights ONE mode.")
np_targets = self._visual_targets.cpu().numpy()
unique_targets = np.unique(np_targets)
class_weights = compute_class_weight('balanced', unique_targets, np_targets)
class_weights_unseen = np.ones(max_class - self._n_classes)
class_weights = np.concatenate((class_weights, class_weights_unseen))
self._class_weights_fake = torch.tensor(class_weights).to(self._device).float()
else:
self._class_weights_fake = None
if config.get("svm"):
logger.info("Learning SVM")
from sklearn.svm import SVC
self._svm = SVC(**config.get("svm"))
self._svm.fit(features.cpu().numpy(), targets.cpu().numpy())
else:
if config.get("next_epochs") and self._task > 0:
epochs = config["next_epochs"]
else:
epochs = config["epochs"]
loops.features_to_classifier_loop(
features,
targets,
flags,
epochs,
optimizer,
self._network.classifier,
self.forward_fakeclassifier,
scheduler=scheduler
)
# -----------------
# Losses definition
# -----------------
def forward_unsupervised(
self, training_network, inputs, targets, memory_flags, metrics, **kwargs
):
inputs = inputs.to(self._device)
loss, outputs = losses.unsupervised_rotations(inputs, memory_flags, training_network)
metrics["rot"] += loss.item()
for i in range(len(outputs["attention"])):
outputs["attention"][i] = outputs["attention"][i][:len(inputs)]
if self._old_model is not None:
with torch.no_grad():
old_outputs = self._old_model(inputs)
if self._pod_spatial_config:
if self._pod_spatial_config.get("scheduled_factor", False):
factor = self._pod_spatial_config["scheduled_factor"] * math.sqrt(
self._n_classes / self._task_size
)
else:
factor = self._pod_spatial_config.get("factor", 1.)
attention_loss = factor * losses.pod(
old_outputs["attention"],
outputs["attention"],
memory_flags=memory_flags.bool(),
task_percent=(self._task + 1) / self._n_tasks,
**self._pod_spatial_config
)
loss += attention_loss
metrics["att"] += attention_loss.item()
if self._pod_flat_config:
factor = self._pod_flat_config.get("factor", 1.)
if self._pod_flat_config.get("scheduled", False):
factor = factor * math.sqrt(self._n_classes / self._task_size)
distil_loss = factor * losses.embeddings_similarity(
outputs["raw_features"], old_outputs["raw_features"]
)
loss += distil_loss
self._metrics["flat"] += distil_loss.item()
return loss
def forward_supervised(
self, training_network, inputs, targets, memory_flags, metrics, epoch, epochs, config,
**kwargs
):
inputs = inputs.to(self._device)
if config.get("features_process") is not None:
# Create once the class for god sake
scaler = Scaler(config.get("features_process"))
else:
scaler = None
loss = 0.
if self._task > 0 and self.ghost_config and self.ghost_config.get(
"negative_weights_percent"
) and not config.get("del_neg_weights", False):
percent = self.ghost_config["negative_weights_percent"]
if self._task == self._n_tasks - 1:
percent = self.ghost_config.get("negative_weights_percent_last", percent)
if isinstance(percent, str) and percent == "hardest":
percent = 0.
if isinstance(percent, str) and percent == "hardest":
ghost_targets = self.get_class_label(
list(range(self._n_classes + self._task_size, self._total_n_classes))
)
ghost_targets = ghost_targets.to(self._device).long()
ghost_similarities = self.similarity_matrix[targets.to(
self._device
)].index_select(dim=1, index=ghost_targets)
if len(ghost_targets) == 0:
additional_features = None
ghost_flags = None
ghost_batch_size = None
else:
most_similars = ghost_targets[ghost_similarities.max(dim=1)[1]]
most_similars = self.get_inv_class_label(most_similars).to(self._device)
additional_features = []
for real_t, ghost_t in zip(targets, most_similars):
indexes = self._ghosts[1] == ghost_t
sub_features = self._ghosts[0][indexes]
rnd_index = torch.randint(low=0, high=len(sub_features), size=(1,))[0]
additional_features.append(sub_features[rnd_index])
additional_features = torch.stack(additional_features)
targets = torch.cat((targets.cpu(), most_similars.cpu()))
ghost_flags = torch.cat(
(torch.ones(len(additional_features)), torch.zeros(len(targets)))
).to(self._device)
ghost_batch_size = len(additional_features)
elif percent == 0.:
additional_features = None
ghost_flags = None
ghost_batch_size = None
else:
batch_size = len(inputs)
ghost_batch_size = max(int(batch_size * percent), 1)
indexes = torch.randperm(len(self._ghosts[0]))[:ghost_batch_size]
additional_features = self._ghosts[0][indexes]
targets = torch.cat((targets, self._ghosts[1][indexes].cpu()), 0)
ghost_flags = torch.cat(
(torch.ones(batch_size), torch.zeros(len(additional_features)))
).to(self._device)
else:
additional_features = None
ghost_flags = None
ghost_batch_size = None
outputs = training_network(
inputs, features_processing=scaler, additional_features=additional_features
)
if self._task >= 2 and self.placement_config and config.get("ghost_placement"):
# Starting from third tasks, we should see the effect of the ghost
# on the classes they were mimicking. Here we want to enforce the new
# classes to place themselves in the empty space given by the ghost.
features = outputs[self.gmmn_config.get("features_key", "raw_features")]
placement_loss = losses.similarity_per_class(
features,
targets if ghost_batch_size is None else targets[:-ghost_batch_size],
*self._old_ghosts,
epoch=epoch,
epochs=epochs,
memory_flags=memory_flags,
old_centroids_features=self._c_features,
old_centroids_targets=self._c_targets,
**self.placement_config
)
if not isinstance(placement_loss, float):
metrics["plc"] += placement_loss.item()
loss += placement_loss
if config.get("only_ghost_placement", False):
return loss
elif self._task >= 2 and self.adv_placement_config and config.get("ghost_placement"):
real_features = outputs[self.gmmn_config.get("features_key", "raw_features")]
real_features = real_features[~memory_flags.bool()]
if len(real_features) == 0:
# Batch is made only of memory data, rare but can happen.
loss += 0.
else:
ghost_features = self._old_ghosts[0]
real_targets = torch.ones(len(real_features)).float().to(self._device)
ghost_targets = torch.zeros(len(ghost_features)).float().to(self._device)
domain_features = torch.cat((real_features, ghost_features))
domain_targets = torch.cat((real_targets, ghost_targets))
domain_logits = self._network.domain_classifier(domain_features)
adv_plc_loss = F.binary_cross_entropy_with_logits(
domain_logits.squeeze(1), domain_targets
)
adv_plc_loss = self.adv_placement_config["factor"] * adv_plc_loss
if self.adv_placement_config["scheduled"]:
adv_plc_loss = (1 - epoch / epochs) * adv_plc_loss
metrics["advPlc"] += adv_plc_loss.item()
loss += adv_plc_loss
if config.get("only_ghost_placement", False):
return loss
if self._nca_config:
nca_config = copy.deepcopy(self._nca_config)
if self.ghost_config:
nca_config.update(self.ghost_config.get("ams_loss", {}))
if self._network.post_processor:
nca_config["scale"] = self._network.post_processor.factor
loss += losses.nca(
outputs["logits"],
targets,
class_weights=self._class_weights,
memory_flags=ghost_flags,
**nca_config
)
metrics["ams"] += loss.item()
elif self._softmax_ce:
loss += F.cross_entropy(
self._network.post_process(outputs["logits"]), targets.to(outputs["logits"].device)
)
metrics["cce"] += loss.item()
else:
raise ValueError("No classification loss defined!")
if self._task > 0 and self.ucir_ranking_config:
if ghost_batch_size is not None:
r_logits = outputs["logits"][:-ghost_batch_size]
r_targets = targets[:-ghost_batch_size]
else:
r_logits = outputs["logits"]
r_targets = targets
ranking_loss = self.ucir_ranking_config.get("factor", 1.0) * losses.ucir_ranking(
r_logits, r_targets.to(r_logits.device), self._n_classes, self._task_size
)
metrics["rnk"] += ranking_loss.item()
loss += ranking_loss
if self._old_model is not None:
with torch.no_grad():
old_outputs = self._old_model(inputs)
if self._pod_spatial_config:
if self._pod_spatial_config.get("scheduled_factor", False):
factor = self._pod_spatial_config["scheduled_factor"] * math.sqrt(
self._n_classes / self._task_size
)
else:
factor = self._pod_spatial_config.get("factor", 1.)
attention_loss = factor * losses.pod(
old_outputs["attention"],
outputs["attention"],
task_percent=(self._task + 1) / self._n_tasks,
**self._pod_spatial_config
)
loss += attention_loss
metrics["att"] += attention_loss.item()
if self._pod_flat_config:
factor = self._pod_flat_config.get("factor", 1.)
if self._pod_flat_config.get("scheduled", False):
factor = factor * math.sqrt(self._n_classes / self._task_size)
distil_loss = factor * losses.embeddings_similarity(
outputs["raw_features"], old_outputs["raw_features"]
)
loss += distil_loss
metrics["flat"] += distil_loss.item()
if self._task != 0 and self._task != self._n_tasks - 1:
if self.hyperplan_config:
type_ = self.hyperplan_config.get("type", "ortho_abs")
factor = self.hyperplan_config["factor"]
if self.hyperplan_config.get("scheduled_factor", False):
factor = factor * math.sqrt(self._total_n_classes - self._n_classes)
features = outputs[self.gmmn_config.get("features_key", "raw_features")]
if not self.hyperplan_config.get("apply_on_new", True):
# Only applying on memory samples
old_classes = list(range(self._n_classes - self._task_size))
indexes = np.where(np.isin(targets.cpu(), old_classes))[0]
features = features[indexes]
if len(features) == 0:
# Can happen if we don't apply the reg on new classes and
# that the batch has not a single memory sample.
metrics["hyper"] += 0.
else:
if self.hyperplan_config.get("normalize_features", True):
features = F.normalize(features, dim=1, p=2)
if self._svm_weights is None:
simi = torch.mm(features, self._hyperplan.T)
if self.hyperplan_config.get("add_bias", False):
simi = simi + self._hyperplan_bias
simi = simi.view(-1)
else:
simi = []
for sv, gamma, dual_coef, intercept in zip(
self._svm_weights["sv"], self._svm_weights["gamma"],
self._svm_weights["dual_coef"], self._svm_weights["intercept"]
):
diff = sv[None, :, :] - features[:, None, :]
tmp = torch.exp(-gamma * diff.norm(dim=-1)**2)
dec = dual_coef.mm(tmp.T) - intercept
simi.append(dec.view(-1))
simi = torch.cat(simi)
# simi should be in [-1 + b, +1 + b]
if type_ == "anticorrelation":
hinges = torch.clamp(simi + 1., min=0.)
elif type_ == "anticorrelation_neg":
hinges = torch.clamp(simi, min=0.)
elif type_ in ("need_neg", "boundary"):
# Check if the point is one the correct side of the hyperplan
hinges = torch.clamp(simi, min=0)
elif type_ == "support_vectors":
# Check if the point is beyond its class support vectors
hinges = torch.clamp(simi + 1, min=0)
else:
raise NotImplementedError(f"Unknow type {type_}.")
if self.hyperplan_config.get("adamine"):
nb_not_null = torch.nonzero(torch.clamp(hinges - 1e-6, min=0.)).shape[0]
nb_not_null = max(nb_not_null, 1)
hyper_loss = torch.sum(hinges) / nb_not_null
else:
hyper_loss = torch.mean(hinges)
hyper_loss = factor * hyper_loss
if self.hyperplan_config.get("scheduled"):
hyper_loss = hyper_loss * math.sqrt(self._n_classes / self._task_size)
metrics["hyper"] += hyper_loss.item()
loss += hyper_loss
elif self.ghost_config and self.ghost_config.get("factor"):
features = outputs[self.gmmn_config.get("features_key", "raw_features")]
ghost_reg = ghost_semantic_regularization(
features, targets, *self._ghosts, self.similarity_matrix, **self.ghost_config
)
if self.ghost_config.get("scheduled_factor", False):
factor = math.sqrt(self._total_n_classes - self._n_classes)
ghost_reg = factor * ghost_reg
metrics["gho"] += ghost_reg.item()
loss += ghost_reg
if self.real_config:
features = outputs[self.gmmn_config.get("features_key", "raw_features")]
real_reg = semantic_regularization(
features, targets, self.similarity_matrix, **self.real_config
)
metrics["rea"] += real_reg.item()
loss += real_reg
if torch.isnan(loss):
raise ValueError(f"Nan loss in {str(metrics)}")
return loss
def forward_gmmn(self, visual_features, semantic_features, class_id, words, metrics):
loss = mmd(real=visual_features, fake=semantic_features, **self.gmmn_config["mmd"])
if self.gmmn_config.get("old_mmd") and self._old_word_embeddings is not None:
old_unseen_limit = self._n_classes - self._task_size
if not self.gmmn_config["old_mmd"].get(
"apply_unseen", False
) and class_id >= old_unseen_limit:
return loss
with torch.no_grad():
old_semantic_features = self._old_word_embeddings(words)
factor = self.gmmn_config["old_mmd"]["factor"]
_type = self.gmmn_config["old_mmd"].get("type", "mmd")
if _type == "mmd":
old_loss = factor * mmd(
real=old_semantic_features, fake=semantic_features, **self.gmmn_config["mmd"]
)
elif _type == "kl":
old_loss = factor * F.kl_div(
semantic_features, old_semantic_features, reduction="batchmean"
)
elif _type == "l2":
old_loss = factor * torch.pairwise_distance(
semantic_features, old_semantic_features, p=2
).mean()
elif _type == "cosine":
old_loss = factor * (
1 - torch.cosine_similarity(semantic_features, old_semantic_features)
).mean()
else:
raise ValueError(f"Unknown distillation: {_type}.")
if self.gmmn_config.get("scheduled"):
old_loss = old_loss * math.sqrt(self._n_classes / self._task_size)
metrics["old"] += old_loss.item()
return loss + old_loss
return loss
def forward_gmmn_linear(self, visual_features, semantic_features):
return F.mse_loss(visual_features, semantic_features)
def forward_fakeclassifier(self, logits, targets, flags, metrics):
if self._nca_config:
nca_config = copy.deepcopy(self._nca_config)
nca_config.update(self.fakeclassifier_config.get("loss", {}))
if self._network.post_processor:
nca_config["scale"] = self._network.post_processor.factor
try:
loss = losses.nca(
logits,
targets,
class_weights=self._class_weights_fake,
memory_flags=flags,
**nca_config,
)
except:
breakpoint()
elif self._softmax_ce:
loss = F.cross_entropy(self._network.post_process(logits), targets)
else:
raise ValueError("No classification loss defined!")
metrics["clf"] += loss.item()
if self._domain_classifier is not None:
weights = self._network.classifier.weights
domain_logits = self._domain_classifier(weights)
nb_unseen = self._total_n_classes - self._n_classes
domain_targets = torch.ones(self._total_n_classes).float()
domain_targets[-nb_unseen:] = 0
factor = self.fakeclassifier_config["training"]["adversarial_classifier"]["factor"]
domain_loss = factor * F.binary_cross_entropy_with_logits(
domain_logits.view(-1), domain_targets.to(domain_logits.device)
)
metrics["adv"] += domain_loss.item()
loss += domain_loss
return loss
def get_class_weights(self, loader):
targets = []
for input_dict in loader:
targets.append(input_dict["targets"])
targets = torch.cat(targets).cpu().numpy()
unique_targets = np.unique(targets)
class_weights = compute_class_weight('balanced', unique_targets, targets)
return torch.tensor(class_weights).to(self._device).float()
def get_class_weights_raw(self, targets):
unique_targets = np.unique(targets)
class_weights = compute_class_weight('balanced', unique_targets, targets)
return torch.tensor(class_weights).to(self._device).float()
# -----------
# Constraints
# -----------
def _setup_constraints(self):
if self._word_embeddings is None:
return
self.similarity_matrix = generate_similarity_matrix(
self._word_embeddings, self.get_class_label(list(range(self._total_n_classes)))
)
if self.ghost_config:
self._word_embeddings.eval()
if self._ghosts is not None:
self._old_ghosts = copy.deepcopy(self._ghosts)
self._ghosts = self._gen_ghost_features(self.ghost_config)
self._word_embeddings.train()
if self.hyperplan_config:
assert self.ghost_config
if self.hyperplan_config.get("linear_nn"):
self._hyperplan = self._gen_linear_hyperplan_constraints(
self._ghosts[0],
self._ghosts[1],
one_per_ghost=self.hyperplan_config.get("one_per_ghost", False),
epochs=self.hyperplan_config.get("epochs", 10),
class_weights=self.hyperplan_config.get("class_weights", False),
apply_current=self.hyperplan_config.get("apply_current", True),
flip=self.hyperplan_config.get("flip", False)
)
self._hyperplan_bias = None
else:
self._hyperplan, self._hyperplan_bias = self._gen_hyperplan_constraints(
self._ghosts[0],
self._ghosts[1],
C=self.hyperplan_config.get("C", 1.0),
end_normalize=self.hyperplan_config.get("end_normalize", False),
end_normalize_bias=self.hyperplan_config.get(
"end_normalize_bias", self.hyperplan_config.get("end_normalize", False)
),
normalize_features=self.hyperplan_config.get("normalize_features", True),
one_per_ghost=self.hyperplan_config.get("one_per_ghost", False),
apply_current=self.hyperplan_config.get("apply_current", True),
flip=self.hyperplan_config.get("flip", False),
kernel=self.hyperplan_config.get("kernel", "linear")
)
else:
self._hyperplan = None
self._hyperplan_bias = None
def _gen_linear_hyperplan_constraints(
self,
ghost_features,
ghost_targets,
one_per_ghost=False,
epochs=10,
class_weights=False,
apply_current=True,
flip=False
):
logger.info("Generating linear hyperplan constraint.")
if apply_current: # Just previous tasks
classes = list(range(self._n_classes - self._task_size, self._n_classes))
else:
classes = []
_, loader = self.inc_dataset.get_custom_loader(classes, memory=self.get_memory())
real_features = utils.extract_features(self._network, loader)[0]
if flip:
_, loader = self.inc_dataset.get_custom_loader(
classes, memory=self.get_memory(), mode="flip"
)
real_features_flipped = utils.extract_features(self._network, loader)[0]
real_features = np.concatenate((real_features, real_features_flipped))
real_features = torch.tensor(real_features).float().to(self._device)
real_targets = torch.zeros(len(real_features)).long().to(self._device)
if one_per_ghost:
hyperplans = []
for target in torch.unique(ghost_targets):
indexes = ghost_targets == target
sub_f = ghost_features[indexes]
sub_t = torch.ones((len(indexes))).long().to(self._device)
clf = BinaryCosineClassifier(real_features.shape[1]).to(self._device)
opt = torch.optim.Adam(clf.parameters(), lr=0.001)
ghost_targets.fill_(1).long().to(self._device)
features = torch.cat((real_features, sub_f))
targets = torch.cat((real_targets, sub_t)).float().to(self._device)
if class_weights:
cw = self.get_class_weights_raw(targets.cpu().numpy())
else:
cw = None
def func_loss(feats, t, fl, m):
if cw is None:
weight = None
else:
weight = cw[t.long()]
return F.binary_cross_entropy_with_logits(feats.squeeze(1), t, weight=weight)
loops.features_to_classifier_loop(
features, targets, None, epochs, opt, clf, func_loss, disable_progressbar=True
)
w = F.normalize(clf.weight.data, dim=1, p=2)
hyperplans.append(w)
return torch.cat(hyperplans)
else:
clf = BinaryCosineClassifier(real_features.shape[1]).to(self._device)
opt = torch.optim.Adam(clf.parameters(), lr=0.001)
ghost_targets.fill_(1).long().to(self._device)
features = torch.cat((real_features, ghost_features))
targets = torch.cat((real_targets, ghost_targets)).float().to(self._device)
if class_weights:
cw = self.get_class_weights_raw(targets.cpu().numpy())
else:
cw = None
def func_loss(feats, t, fl, m):
if cw is None:
weight = None
else:
weight = cw[t.long()]
return F.binary_cross_entropy_with_logits(feats.squeeze(1), t, weight=weight)
loops.features_to_classifier_loop(
features, targets, None, epochs, opt, clf, func_loss, disable_progressbar=True
)
return F.normalize(clf.weight.data, dim=1, p=2)
def _gen_hyperplan_constraints(
self,
ghost_features,
ghost_targets,
C=1.0,
end_normalize=False,
end_normalize_bias=False,
normalize_features=True,
one_per_ghost=False,
apply_current=True,
flip=False,
kernel="linear"
):
self._svm_weights = None
logger.info("Generating hyperplan constraint.")
if apply_current: # Just previous task
classes = list(range(self._n_classes - self._task_size, self._n_classes))
else:
classes = []
_, loader = self.inc_dataset.get_custom_loader(classes, memory=self.get_memory())
real_features = utils.extract_features(self._network, loader)[0]
if flip:
_, loader = self.inc_dataset.get_custom_loader(
classes, memory=self.get_memory(), mode="flip"
)
real_features_flipped = utils.extract_features(self._network, loader)[0]
real_features = np.concatenate((real_features, real_features_flipped))
real_targets = np.zeros(len(real_features))
ghost_features = ghost_features.cpu().numpy()
if one_per_ghost:
ghost_targets = ghost_targets.cpu().numpy()
if kernel == "linear":
hyperplans, biases = [], []
else:
self._svm_weights = collections.defaultdict(list)
for class_id in np.unique(ghost_targets):
tmp_ghost_features = ghost_features[np.where(ghost_targets == class_id)[0]]
tmp_features = np.concatenate((real_features, tmp_ghost_features))
if normalize_features:
tmp_features = tmp_features / np.linalg.norm(
tmp_features, axis=1, keepdims=True
)
tmp_targets = np.concatenate((real_targets, np.ones(len(tmp_ghost_features))))
svm = SVC(C=C, kernel=kernel, gamma="scale")
svm.fit(tmp_features, tmp_targets)
if kernel == "linear":
hyperplans.append(torch.tensor(svm.coef_[0]).float().to(self._device)[None])
biases.append(torch.tensor(svm.intercept_[0]).float().to(self._device))
else:
self._svm_weights["sv"].append(
torch.tensor(svm.support_vectors_).float().to(self._device)
)
self._svm_weights["gamma"].append(
1 / (tmp_features.shape[1] * tmp_features.var())
)
self._svm_weights["dual_coef"].append(
torch.tensor(svm.dual_coef_).float().to(self._device)
)
self._svm_weights["intercept"].append(
torch.tensor(svm.intercept_).float().to(self._device)
)
if kernel == "linear":
hyperplan = torch.cat(hyperplans)
bias = torch.stack(biases)
else:
hyperplan, bias = None, None
else:
ghost_targets = np.ones(len(ghost_features))
features = np.concatenate((real_features, ghost_features))
if normalize_features:
features = features / np.linalg.norm(features, axis=1, keepdims=True)
targets = np.concatenate((real_targets, ghost_targets))
svm = SVC(C=C, kernel=kernel, gamma="scale")
svm.fit(features, targets)
acc = svm.score(features, targets)
logger.info(f"SVM got {acc} on the train set (binary, real vs ghost).")
if kernel == "linear":
hyperplan = torch.tensor(svm.coef_[0]).float().to(self._device)[None]
bias = torch.tensor(svm.intercept_[0]).float().to(self._device)
else:
self._svm_weights = {
"sv":
[torch.tensor(svm.support_vectors_).float().to(self._device)],
"gamma":
[torch.tensor([1 / (features.shape[1] * features.var())]).float().to(self._device)],
"dual_coef":
[torch.tensor(svm.dual_coef_).float().to(self._device)],
"intercept":
[torch.tensor(svm.intercept_).float().to(self._device)]
}
hyperplan, bias = None, None
if end_normalize:
hyperplan = F.normalize(hyperplan, dim=1, p=2)
if end_normalize_bias:
if len(bias.shape) > 1:
bias = F.normalize(bias, dim=1, p=2)
else:
bias = F.normalize(bias, dim=0, p=2)
return hyperplan, bias
def _gen_ghost_features(self, config):
if config.get("nb_unseen_classes") is not None:
classes_to_create = list(
range(self._n_classes, self._n_classes + config["nb_unseen_classes"])
)
else:
classes_to_create = list(range(self._n_classes, self._total_n_classes))
if config.get("cheat"):
logger.info("Custom cheat for ghosts")
# Test if real future taken in the past are really better than our fakes
# This should be our upperbound
if config["cheat"] == "pixels":
logger.info("Cheat at pixel-level")
_, loader = self.inc_dataset.get_custom_loader(classes_to_create)
x, y, m = loader.dataset.x, loader.dataset.y, loader.dataset.memory_flags
self._cheat_pixels = (x, y, m)
f, t = utils.extract_features(self._network, loader)
features = torch.tensor(f).to(self._device)
targets = torch.tensor(t).long().to(self._device)
else:
if config["cheat"] == "own":
martymcfly = self._network
else:
martymcfly = network.BasicNet(
self.args["convnet"],
convnet_kwargs=self.args.get("convnet_config", {}),
classifier_kwargs=self.args.get("classifier_config"),
postprocessor_kwargs=self.args.get("postprocessor_config", {}),
device=self._device,
extract_no_act=True,
classifier_no_act=self.args.get("classifier_no_act", True),
return_features=True,
attention_hook=True,
rotations_predictor=True
)
state_dict = torch.load(
os.path.join(config["cheat"], f"net_0_task_{self._task + 1}.pth")
)
for key in list(state_dict.keys()):
if "classifier" in key:
del state_dict[key]
martymcfly.load_state_dict(state_dict, strict=True)
features, targets = [], []
for class_id in classes_to_create:
_, loader = self.inc_dataset.get_custom_loader([class_id])
f, t = utils.extract_features(martymcfly, loader)
if config["amount_per_class"] is not None:
indexes = np.arange(len(f))
indexes = np.random.choice(indexes, size=config["amount_per_class"])
f = f[indexes]
t = t[indexes]
features.append(f)
targets.append(t)
features = np.concatenate(features)
targets = np.concatenate(targets)
features = torch.tensor(features).to(self._device)
targets = torch.tensor(targets).long().to(self._device)
else:
features, targets = [], []
for class_id in classes_to_create:
class_ids = [class_id for _ in range(config["amount_per_class"])]
real_class_ids = self.get_class_label(class_ids).to(self._device)
with torch.no_grad():
features.append(self._word_embeddings(real_class_ids))
targets.extend(class_ids)
features = torch.cat(features).to(self._device)
targets = torch.tensor(targets).long().to(self._device)
if config.get("inverse_transform") and self._preprocessing is not None:
logger.info("Inverse transform of ghost features.")
features = self._preprocessing.inverse_transform(features)
if config.get("align_features_per_class"):
logger.info("Aligning features per class")
new_features, new_targets = [], []
for t in torch.unique(targets):
indexes = t == targets
new_features.append(self._network.classifier.align_features(features[indexes]))
new_targets.append(targets[indexes])
features = torch.cat(new_features)
targets = torch.cat(new_targets)
elif config.get("align_features"):
logger.info("Aligning features")
features = self._network.classifier.align_features(features)
if config.get("average_per_class"):
f, t = [], []
for i in classes_to_create:
indexes = targets == i
f.append(features[indexes].mean(dim=0))
t.append(i)
avg_features = torch.stack(f)
if config.get("subsample_per_class"):
f, t = [], []
for i in classes_to_create:
indexes = np.where(targets.cpu().numpy() == i)[0]
indexes = np.random.choice(
indexes, size=config["subsample_per_class"], replace=False
)
f.append(features[indexes])
t.append(targets[indexes])
features = torch.cat(f)
targets = torch.cat(t)
if config.get("negative_weights", False):
self._network.classifier.set_negative_weights(
avg_features, config["negative_weights_ponderation"]
)
return features, targets
def get_fake_weights(
self, real_weights, nb_samples=100, method="word_embeddings", weight_norm=True, **kwargs
):
classes_to_create = list(range(self._n_classes, self._total_n_classes))
if method == "word_embeddings":
self._word_embeddings.eval()
weights = []
for class_id in classes_to_create:
class_id = [class_id for _ in range(nb_samples)]
real_class_id = self.get_class_label(class_id).to(self._device)
weights.append(self._word_embeddings(real_class_id).mean(dim=0, keepdims=True))
weights = torch.cat(weights, dim=0)
self._word_embeddings.train()
elif method == "random":
weights = torch.randn(len(classes_to_create), 128).float().to(self._device)
else:
raise NotImplementedError(
"Unknown method {} to generate unseen weights.".format(method)
)
if weight_norm:
avg_weights_norm = torch.mean(real_weights.data.norm(dim=1, keepdim=True))
weights.mul_(avg_weights_norm)
return weights
def _accuracy(self, loader):
ypred, ytrue = self._eval_task(loader)
ypred = ypred.argmax(axis=1)
return 100 * round(np.mean(ypred == ytrue), 3)
def semantic_regularization(
features, targets, similarity_matrix, margin=None, aggreg="mean", factor=1.0, metric="cosine"
):
pair_indexes = []
np_targets = targets.cpu().numpy()
for index, target in enumerate(np_targets):
neg_indexes = np.where(np_targets != target)[0]
neg_index = np.random.choice(neg_indexes)
pair_indexes.append(tuple(sorted((index, neg_index))))
pair_indexes_ = list(set(pair_indexes))
pair_indexes = torch.tensor(pair_indexes_).long()
left = features[pair_indexes[..., 0]]
right = features[pair_indexes[..., 1]]
if metric == "cosine":
similarities = F.cosine_similarity(left, right)
if margin is not None:
margins = torch.ones_like(similarities) * margin
else:
margins = similarity_matrix[targets[pair_indexes[..., 0]], targets[pair_indexes[...,
1]]]
hinges = torch.clamp(similarities - margins, min=0.)
return factor * _aggreg(hinges, aggreg, features_dim=features.shape[1])
elif metric == "gor":
similarities = torch.sum(torch.mul(left, right), 1)
return factor * _aggreg(similarities, aggreg, features_dim=features.shape[1])
elif metric == "snr":
noise = left - right
var_noise = noise.var(axis=1, unbiased=True)
var_anchor = right.var(axis=1, unbiased=True)
dist = torch.mean(var_anchor / var_noise)
return factor * dist
else:
raise NotImplementedError(f"Unknown metric: {metric}.")
def ghost_semantic_regularization(
features,
targets,
ghost_features,
ghost_targets,
similarity_matrix,
margin=None,
aggreg="mean",
factor=1.0,
metric="cosine",
scale_real=False,
scale_ghost=False,
normalize=False,
triplet=False,
against_all=None,
**kwargs
):
if scale_real:
scale(features, (0, 1))
if scale_ghost:
scale(ghost_features, (0, 1))
if normalize:
features = F.normalize(features, p=2, dim=-1)
ghost_features = F.normalize(ghost_features, p=2, dim=-1)
if triplet:
# Anchor-positive distances
dists = -torch.mm(features, features.T)
indexes_not_equal = ~torch.eye(len(targets)).bool().to(features.device)
labels_equal = targets.unsqueeze(0) == targets.unsqueeze(1)
mask = indexes_not_equal & labels_equal.to(features.device)
ap = (dists.to(features.device) * mask.to(features.device).float()).max(dim=1)[0]
# Anchor-negative distances
an = -torch.mm(features, ghost_features.T)
an = an.min(dim=1)[0]
hinges = torch.clamp(margin + ap - an, min=0.)
return _aggreg(hinges, aggreg, features_dim=features.shape[1])
elif against_all is not None:
assert normalize
if margin is None:
margin = 0.
similarities = torch.mm(features, ghost_features.T)
if isinstance(against_all, int):
similarities = similarities.topk(against_all, dim=1)[0]
hinges = torch.clamp(similarities.view(-1) - margin, min=0.)
return _aggreg(hinges, aggreg, features_dim=features.shape[1])
else:
neg_indexes = []
np_targets = targets.cpu().numpy()
for index, target in enumerate(np_targets):
neg_index = np.random.choice(len(ghost_targets))
neg_indexes.append(neg_index)
selected_ghosts_features = ghost_features[neg_indexes]
selected_ghosts_targets = ghost_targets[neg_indexes]
if metric == "cosine":
similarities = F.cosine_similarity(features, selected_ghosts_features)
if margin is not None:
margins = torch.ones_like(similarities) * margin
else:
margins = similarity_matrix[targets, selected_ghosts_targets]
hinges = torch.clamp(similarities - margins, min=0.)
return factor * _aggreg(hinges, aggreg, features_dim=features.shape[1])
elif metric == "gor":
similarities = torch.sum(torch.mul(features, selected_ghosts_features), 1)
return factor * _aggreg(similarities, aggreg, features_dim=features.shape[1])
elif metric == "snr":
noise = selected_ghosts_features - features
var_noise = noise.var(axis=1, unbiased=True)
var_anchor = features.var(axis=1, unbiased=True)
dist = torch.mean(var_anchor / var_noise)
return factor * dist
else:
raise NotImplementedError(f"Unknown metric: {metric}.")
def _aggreg(hinges, aggreg_method, features_dim):
if isinstance(aggreg_method, int):
return torch.mean(torch.topk(hinges, k=aggreg_method)[0])
elif aggreg_method == "mean":
return torch.mean(hinges)
elif aggreg_method == "adamine":
nb_not_null = (torch.clamp(hinges - 1e-6, min=0.) != 0.).sum()
if nb_not_null == 0.:
nb_not_null = 1.
return torch.sum(hinges) / nb_not_null
elif aggreg_method == "gor":
first_moment = torch.mean(hinges)
second_moment = torch.mean(torch.pow(hinges, 2))
return torch.pow(first_moment, 2) + torch.clamp(second_moment - 1. / features_dim, min=0.)
raise NotImplementedError("Unknown aggreg {}.".format(aggreg_method))
def generate_similarity_matrix(word_embeddings, class_ids):
classes = class_ids.clone().detach().long().to(word_embeddings.device)
with torch.no_grad():
embeddings = word_embeddings.forward(classes, only_word=True)
embeddings = F.normalize(embeddings, dim=1, p=2)
return torch.mm(embeddings, embeddings.t())
def mmd(
fake,
real,
sigmas=[2, 5, 10, 20, 40, 80],
normalize=True,
scale_matrix=False,
bucher=False,
**kwargs
):
"""Maximum Mean Discrepancy with several Gaussian kernels."""
if normalize:
real = F.normalize(real, dim=1, p=2)
#fake = F.normalize(fake, dim=1, p=2)
if bucher:
return moment_loss(fake, real, sigma=sigmas, device=real.device)
xy = torch.cat((fake, real), dim=0)
if scale_matrix:
scale = get_scale_matrix(len(fake), len(real), real.device)
scale = torch.matmul(scale, scale.t())
else:
scale = 1.
xx = torch.mm(xy, xy.t())
x2 = torch.sum(xx**2, dim=1, keepdim=True)
exponent = xx - 0.5 * x2 - 0.5 * x2.t()
loss = 0.
for sigma in sigmas:
kernel_val = torch.exp(exponent / sigma)
loss += torch.sum(scale * kernel_val)
if torch.isnan(loss):
breakpoint()
return torch.sqrt(loss)
def get_scale_matrix(M, N, device):
s1 = torch.ones((N, 1)) * 1.0 / N
s2 = torch.ones((M, 1)) * -1.0 / M
s1, s2 = s1.to(device), s2.to(device)
return torch.cat((s1, s2), 0)
def moment_loss(gen_samples, x, sigma, device):
X = torch.cat((gen_samples, x), 0)
XX = torch.matmul(X, X.t())
X2 = torch.sum(X * X, 1, keepdim=True)
exp = XX - 0.5 * X2 - 0.5 * X2.t()
M = gen_samples.size()[0]
N = x.size()[0]
s = get_scale_matrix(M, N, device)
S = torch.matmul(s, s.t())
loss = 0
for v in sigma:
kernel_val = torch.exp(exp / v)
loss += torch.sum(S * kernel_val)
return torch.sqrt(loss)
@functools.lru_cache(maxsize=1, typed=False)
def _get_mmd_sigmas(sigmas, device):
sigmas = torch.tensor(sigmas)[:, None, None].to(device).float()
return -1 / (2 * sigmas)
<EMAIL>(maxsize=1, typed=False)
def get_scale_matrix(M, N, device):
s1 = (torch.ones((N, 1)) * 1.0 / N)
s2 = (torch.ones((M, 1)) * -1.0 / M)
return torch.cat((s1, s2), 0).to(device)
def scale(tensor, feature_range=(0, 1)):
data_min = torch.min(tensor, dim=0)[0]
data_max = torch.max(tensor, dim=0)[0]
data_range = data_max - data_min
# Handle null values
data_range[data_range == 0.] = 1.
scale_ = (feature_range[1] - feature_range[0]) / data_range
min_ = feature_range[0] - data_min * scale_
return tensor.mul(scale_).add_(min_)
class Scaler:
"""
Transforms each channel to the range [a, b].
"""
def __init__(self, feature_range, robust=0, normalize=False, truncate=False):
self.feature_range = feature_range
self.robust = robust
self.normalize = normalize
self.truncate = truncate
if self.robust:
self.skprepro = skpreprocessing.RobustScaler()
def fit(self, tensor):
if self.normalize:
self.mu, self.sigma = tensor.mean(dim=0), tensor.std(dim=0)
tensor = (tensor - self.mu.expand_as(tensor)) / self.sigma.expand_as(tensor)
if self.truncate:
tensor = tensor.clamp(min=self.feature_range[0], max=self.feature_range[1])
if self.robust > 0:
device = tensor.device
tensor = tensor.cpu().numpy()
tensor = self.skprepro.fit_transform(tensor)
tensor = torch.tensor(tensor).to(device)
if self.robust == 0 or self.robust == 2:
data_min = torch.min(tensor, dim=0)[0]
data_max = torch.max(tensor, dim=0)[0]
data_range = data_max - data_min
# Handle null values
data_range[data_range == 0.] = 1.
self.scale_ = (self.feature_range[1] - self.feature_range[0]) / data_range
self.min_ = self.feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, tensor):
if self.normalize:
tensor = (tensor - self.mu.expand_as(tensor)) / self.sigma.expand_as(tensor)
if self.robust > 0:
device = tensor.device
tensor = tensor.cpu().numpy()
tensor = self.skprepro.transform(tensor)
tensor = torch.tensor(tensor).to(device)
if self.robust == 0 or self.robust == 2:
return tensor.mul_(self.scale_).add_(self.min_)
return tensor
def inverse_transform(self, tensor):
if self.normalize:
tensor = (tensor * self.sigma.expand_as(tensor)) + self.mu.expand_as(tensor)
if self.robust == 0 or self.robust == 2:
tensor = tensor.sub_(self.min_).div_(self.scale_)
if self.robust > 0:
device = tensor.device
tensor = tensor.cpu().numpy()
tensor = self.skprepro.inverse_transform(tensor)
tensor = torch.tensor(tensor).to(device)
return tensor
def fit_transform(self, tensor):
self.fit(tensor)
return self.transform(tensor)
class Normalizer:
def fit(self, tensor):
return self
def transform(self, tensor):
return F.normalize(tensor, dim=1, p=2)
def fit_transform(self, tensor):
self.fit(tensor)
return self.transform(tensor)
|
koku/api/report/test/azure/test_views.py | rubik-ai/koku | 157 | 11137034 | <filename>koku/api/report/test/azure/test_views.py
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the Azure Report views."""
from unittest.mock import patch
from django.http import HttpRequest
from django.http import QueryDict
from django.urls import reverse
from faker import Faker
from rest_framework import status
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.test import APIClient
from api.iam.test.iam_test_case import IamTestCase
from api.models import User
from api.report.azure.view import AzureCostView
from api.report.view import _convert_units
from api.utils import UnitConverter
FAKE = Faker()
class AzureReportViewTest(IamTestCase):
"""Azure report view test cases."""
def setUp(self):
"""Set up the customer view tests."""
super().setUp()
self.report = {
"group_by": {"subscription_guid": ["*"]},
"filter": {
"resolution": "monthly",
"time_scope_value": -1,
"time_scope_units": "month",
"resource_scope": [],
},
"data": [
{
"date": "2018-07",
"subscription_guids": [
{
"subscription_guid": "00000000-0000-0000-0000-000000000000",
"values": [
{
"date": "2018-07",
"units": "GB-Mo",
"subscription_guid": "00000000-0000-0000-0000-000000000000",
"total": 1826.74238146924,
}
],
},
{
"subscription_guid": "11111111-1111-1111-1111-111111111111",
"values": [
{
"date": "2018-07",
"units": "GB-Mo",
"subscription_guid": "11111111-1111-1111-1111-111111111111",
"total": 1137.74036198065,
}
],
},
{
"subscription_guid": "22222222-2222-2222-2222-222222222222",
"values": [
{
"date": "2018-07",
"units": "GB-Mo",
"subscription_guid": "22222222-2222-2222-2222-222222222222",
"total": 1045.80659412797,
}
],
},
{
"subscription_guid": "33333333-3333-3333-3333-333333333333",
"values": [
{
"date": "2018-07",
"units": "GB-Mo",
"subscription_guid": "33333333-3333-3333-3333-333333333333",
"total": 807.326470618818,
}
],
},
{
"subscription_guid": "44444444-4444-4444-4444-444444444444",
"values": [
{
"date": "2018-07",
"units": "GB-Mo",
"subscription_guid": "44444444-4444-4444-4444-444444444444",
"total": 658.306642830709,
}
],
},
],
}
],
"total": {"value": 5475.922451027388, "units": "GB-Mo"},
}
def test_convert_units_success(self):
"""Test unit conversion succeeds."""
converter = UnitConverter()
to_unit = "byte"
expected_unit = f"{to_unit}-Mo"
report_total = self.report.get("total", {}).get("value")
result = _convert_units(converter, self.report, to_unit)
result_unit = result.get("total", {}).get("units")
result_total = result.get("total", {}).get("value")
self.assertEqual(expected_unit, result_unit)
self.assertEqual(report_total * 1e9, result_total)
def test_convert_units_list(self):
"""Test that the list check is hit."""
converter = UnitConverter()
to_unit = "byte"
expected_unit = f"{to_unit}-Mo"
report_total = self.report.get("total", {}).get("value")
report = [self.report]
result = _convert_units(converter, report, to_unit)
result_unit = result[0].get("total", {}).get("units")
result_total = result[0].get("total", {}).get("value")
self.assertEqual(expected_unit, result_unit)
self.assertEqual(report_total * 1e9, result_total)
def test_convert_units_total_not_dict(self):
"""Test that the total not dict block is hit."""
converter = UnitConverter()
to_unit = "byte"
expected_unit = f"{to_unit}-Mo"
report = self.report["data"][0]["subscription_guids"][0]["values"][0]
report_total = report.get("total")
result = _convert_units(converter, report, to_unit)
result_unit = result.get("units")
result_total = result.get("total")
self.assertEqual(expected_unit, result_unit)
self.assertEqual(report_total * 1e9, result_total)
@patch("api.report.azure.query_handler.AzureReportQueryHandler")
def test_costview_with_units_success(self, mock_handler):
"""Test unit conversion succeeds in AzureCostView."""
mock_handler.return_value.execute_query.return_value = self.report
params = {
"group_by[subscription_guid]": "*",
"filter[resolution]": "monthly",
"filter[time_scope_value]": "-1",
"filter[time_scope_units]": "month",
"units": "byte",
"SERVER_NAME": "",
}
user = User.objects.get(username=self.user_data["username"])
django_request = HttpRequest()
qd = QueryDict(mutable=True)
qd.update(params)
django_request.GET = qd
request = Request(django_request)
request.user = user
response = AzureCostView().get(request)
self.assertIsInstance(response, Response)
def test_execute_query_w_delta_total(self):
"""Test that delta=total returns deltas."""
qs = "delta=cost"
url = reverse("reports-azure-costs") + "?" + qs
client = APIClient()
response = client.get(url, **self.headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_execute_query_w_delta_bad_choice(self):
"""Test invalid delta value."""
bad_delta = "Invalid"
expected = f'"{bad_delta}" is not a valid choice.'
qs = f"group_by[subscription_guid]=*&filter[limit]=2&delta={bad_delta}"
url = reverse("reports-azure-costs") + "?" + qs
client = APIClient()
response = client.get(url, **self.headers)
result = str(response.data.get("delta")[0])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(result, expected)
|
corehq/apps/app_manager/__init__.py | dimagilg/commcare-hq | 471 | 11137041 | <gh_stars>100-1000
from django.apps import AppConfig
from django.conf import settings
from corehq.preindex import ExtraPreindexPlugin
class AppManagerAppConfig(AppConfig):
name = 'corehq.apps.app_manager'
def ready(self):
# Also sync this app's design docs to APPS_DB
ExtraPreindexPlugin.register('app_manager', __file__, settings.APPS_DB)
default_app_config = 'corehq.apps.app_manager.AppManagerAppConfig'
|
elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/helpers.py | gitradar/RedELK | 1,863 | 11137046 | <reponame>gitradar/RedELK<filename>elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/helpers.py
#!/usr/bin/python3
"""
Part of RedELK
Authors:
- <NAME>. / <NAME> (@xychix)
- <NAME> (@fastlorenzo)
"""
import copy
import datetime
import json
import logging
import os
from elasticsearch import Elasticsearch
import urllib3
import config
urllib3.disable_warnings()
es = Elasticsearch(config.es_connection, verify_certs=False)
logger = logging.getLogger('helpers')
def pprint(to_print):
""" Returns a visual representation of an object """
if isinstance(to_print, type(str)):
return to_print
out_string = json.dumps(to_print, indent=2, sort_keys=True)
return out_string
def get_value(path, source, default_value=None):
""" Gets the value in source based on the provided path, or 'default_value' if not exists (default: None) """
split_path = path.split('.')
if split_path[0] in source:
if len(split_path) > 1:
return get_value('.'.join(split_path[1:]), source[split_path[0]])
if split_path[0] == 'ip':
if isinstance(source[split_path[0]], type([])):
return source[split_path[0]][0]
return source[split_path[0]]
return default_value
def get_query(query, size=5000, index='redirtraffic-*'):
""" Get results via ES query. Returns [] if nothing found """
es_query = {'query': {'query_string': {'query': query}}}
# pylint: disable=unexpected-keyword-arg
es_result = es.search(index=index, body=es_query, size=size)
if es_result['hits']['total']['value'] == 0:
return []
return es_result['hits']['hits']
def get_hits_count(query, index='redirtraffic-*'):
""" Returns the total number of hits for a given query """
es_query = {'query': {'query_string': {'query': query}}}
# pylint: disable=unexpected-keyword-arg
es_result = es.search(index=index, body=es_query, size=0)
return es_result['hits']['total']['value']
def raw_search(query, size=10000, index='redirtraffic-*'):
""" Execute a raw ES query. Returns the hits or None if no results """
# pylint: disable=unexpected-keyword-arg
es_result = es.search(index=index, body=query, size=size)
if es_result['hits']['total']['value'] == 0:
return None
return es_result
def set_tags(tag, lst):
""" Sets tag to all objects in lst """
for doc in lst:
if 'tags' in doc['_source'] and tag not in doc['_source']['tags']:
doc['_source']['tags'].append(tag)
else:
doc['_source']['tags'] = [tag]
es.update(index=doc['_index'], id=doc['_id'], body={'doc': doc['_source']})
def add_tags_by_query(tags, query, index='redirtraffic-*'):
""" Add tags by DSL query in batch """
tags_string = ','.join(map(repr, tags))
update_q = {
'script': {
'source': f'ctx._source.tags.add([{tags_string}])',
'lang': 'painless'
},
'query': query
}
return es.update_by_query(index=index, body=update_q)
def add_alarm_data(doc, data, alarm_name, alarmed=True):
""" Adds alarm extra data to the source doc in ES """
now_str = datetime.datetime.utcnow().isoformat()
# Create the alarm field if it doesn't exist yet
if 'alarm' not in doc['_source']:
doc['_source']['alarm'] = {}
# Set the last checked date
data['last_checked'] = now_str
doc['_source']['alarm']['last_checked'] = now_str
# set the last alarmed date (if alarmed)
if alarmed:
doc['_source']['alarm']['last_alarmed'] = now_str
data['last_alarmed'] = now_str
# Add the extra data
doc['_source']['alarm'][alarm_name] = data
es.update(index=doc['_index'], id=doc['_id'], body={'doc': doc['_source']})
return doc
def set_checked_date(doc):
""" Sets the alarm.last_checked date to an ES doc """
if 'alarm' in doc['_source']:
doc['_source']['alarm']['last_checked'] = datetime.datetime.utcnow().isoformat()
else:
doc['_source']['alarm'] = {
'last_checked': datetime.datetime.utcnow().isoformat()
}
es.update(index=doc['_index'], id=doc['_id'], body={'doc': doc['_source']})
return doc
def group_hits(hits, groupby, res=None):
""" Takes a list of hits and a list of field names (dot notation) and returns a grouped list """
if len(groupby) > 0:
hits_list = {}
# First time in the loop
if res is None:
for hit in hits:
value = get_value(f'_source.{groupby[0]}', hit)
if value in hits_list:
hits_list[value].append(hit)
else:
hits_list[value] = [hit]
else:
for key, val in res.items():
for hit in val:
value = get_value(f'_source.{groupby[0]}', hit)
tmp_key = f'{key} / {value}'
if tmp_key in hits_list:
hits_list[tmp_key].append(hit)
else:
hits_list[tmp_key] = [hit]
groupby.pop(0)
return group_hits(hits, groupby, hits_list)
if res is None:
return hits
tmp_hits = []
for key, value in res.items():
tmp_hits.append(value[0])
return tmp_hits
def get_last_run(module_name):
""" Returns the last time the module did run """
try:
query = {'query': {'term': {'module.name': module_name}}}
es_result = raw_search(query, index='redelk-modules')
if len(es_result['hits']['hits']) > 0:
es_timestamp = get_value('_source.module.last_run.timestamp', es_result['hits']['hits'][0])
es_date = datetime.datetime.strptime(es_timestamp, '%Y-%m-%dT%H:%M:%S.%f')
return es_date
return datetime.datetime.fromtimestamp(0)
# pylint: disable=broad-except
except Exception as error:
logger.debug('Error parsing last run time: %s', error)
return datetime.datetime.fromtimestamp(0)
def module_did_run(module_name, module_type='unknown', status='unknown', message=None, count=0):
""" Returns true if the module already ran, false otherwise """
logger.debug('Module did run: %s:%s [%s] %s', module_type, module_name, status, message)
try:
now_ts = datetime.datetime.utcnow().isoformat()
doc = {
'module': {
'name': module_name,
'type': module_type,
'last_run': {
'timestamp': now_ts,
'status': status,
'count': count
}
}
}
if message:
doc['module']['last_run']['message'] = message
es.index(index='redelk-modules', id=module_name, body=doc)
return True
# pylint: disable=broad-except
except Exception as error:
logger.error('Error writting last run time for module %s: %s',
module_name, os.path.join(config.TEMP_DIR, module_name))
logger.exception(error)
return False
def module_should_run(module_name, module_type): # pylint: disable=too-many-branches
"""Check if the module is enabled and when is the last time the module ran.
If the last time is before now - interval, the module will be allowed to run"""
if module_type == 'redelk_alarm':
if module_name not in config.alarms:
logger.warning('Missing configuration for alarm [%s]. Will not run!', module_name)
return False
if 'enabled' in config.alarms[module_name] and not config.alarms[module_name]['enabled']:
logger.warning('Alarm module [%s] disabled in configuration file. Will not run!', module_name)
return False
if 'interval' in config.alarms[module_name]:
interval = config.alarms[module_name]['interval']
else:
interval = 360
elif module_type == 'redelk_enrich':
if module_name not in config.enrich:
logger.warning('Missing configuration for enrichment module [%s]. Will not run!', module_name)
return False
if 'enabled' in config.enrich[module_name] and not config.enrich[module_name]['enabled']:
logger.warning('Enrichment module [%s] disabled in configuration file. Will not run!', module_name)
return False
if 'interval' in config.enrich[module_name]:
interval = config.enrich[module_name]['interval']
else:
interval = 360
else:
logger.warning('Invalid module type for shouldModuleRun(%s, %s)', module_name, module_type)
return False
now = datetime.datetime.utcnow()
last_run = get_last_run(module_name)
interval = datetime.timedelta(seconds=interval)
last_run_max = now - interval
should_run = last_run < last_run_max
if not should_run:
logger.info('Module [%s] already ran within the interval of %s seconds (%s)',
module_name, interval, last_run.isoformat())
else:
logger.info('All checks ok for module [%s]. Module should run.', module_name)
logger.debug('Last run: %s | Last run max: %s', last_run.isoformat(), last_run_max.isoformat())
return should_run
def get_initial_alarm_result():
""" Returns the initial_alarm_result object """
return copy.deepcopy(initial_alarm_result)
initial_alarm_result = {
'info': {
'version': 0.0,
'name': 'unknown',
'alarmmsg': 'unkown',
'description': 'unknown',
'type': 'redelk_alarm',
'submodule': 'unknown'
},
'hits': {
'hits': [],
'total': 0
},
'mutations': {},
'fields': ['host.name', 'user.name', '@timestamp', 'c2.message'],
'groupby': []
}
|
devtools/scripts/conda_env.py | jhrmnn/QCEngine | 105 | 11137055 | <reponame>jhrmnn/QCEngine
import argparse
import os
import shutil
import subprocess as sp
# Args
parser = argparse.ArgumentParser(description='Creates a conda environment from file for a given Python version.')
parser.add_argument('-n', '--name', type=str, nargs=1, help='The name of the created Python environment')
parser.add_argument('-p', '--python', type=str, nargs=1, help='The version of the created Python environment')
parser.add_argument('conda_file', nargs='*', help='The file for the created Python environment')
args = parser.parse_args()
with open(args.conda_file[0], "r") as handle:
script = handle.read()
tmp_file = "tmp_env.yaml"
script = script.replace("- python", "- python {}*".format(args.python[0]))
with open(tmp_file, "w") as handle:
handle.write(script)
conda_path = shutil.which("conda")
print("CONDA ENV NAME {}".format(args.name[0]))
print("PYTHON VERSION {}".format(args.python[0]))
print("CONDA FILE NAME {}".format(args.conda_file[0]))
print("CONDA path {}".format(conda_path))
sp.call("{} env create -n {} -f {}".format(conda_path, args.name[0], tmp_file), shell=True)
os.unlink(tmp_file)
|
scripts/speech/head.py | Steven1791/seq2seq | 383 | 11137065 | #!/usr/bin/env python3
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
parser.add_argument('-n', type=int, default=10)
args = parser.parse_args()
with open(args.input, 'rb') as input_file, open(args.output, 'wb') as output_file:
n, dim = np.load(input_file)
n = min(args.n, n)
np.save(output_file, (n, dim))
for _ in range(n):
feats = np.load(input_file)
np.save(output_file, feats)
|
mmdet/utils/__init__.py | cameronchoi/r3det-docker | 176 | 11137090 | <filename>mmdet/utils/__init__.py<gh_stars>100-1000
from .collect_env import collect_env
from .flops_counter import get_model_complexity_info
from .logger import get_root_logger
from .rbbox_visualization import imshow_det_rbboxes
__all__ = ['get_model_complexity_info', 'get_root_logger', 'collect_env',
'imshow_det_rbboxes']
|
track2/test-densemapnet.py | omshinde/dfc2019 | 123 | 11137098 | <reponame>omshinde/dfc2019
import numpy as np
import os
from keras.models import load_model
from keras.applications import imagenet_utils
from tqdm import tqdm
import tifffile
import cv2
import glob
from densemapnet import densemapnet
import argparse
NO_DATA = -999.0
COMPLETENESS_THRESHOLD_PIXELS = 3.0
#GPU="0" # default GPU
GPU="-1" # no GPU
if __name__=="__main__":
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('test_folder', type=str)
parser.add_argument('output_folder', type=str)
parser.add_argument('model_file', type=str)
args = parser.parse_args()
# load the model
height = 1024
width = 1024
bands = 3
print(height, width, bands)
os.environ["CUDA_VISIBLE_DEVICES"]=GPU
settings = densemapnet.Settings()
settings.xdim = width
settings.ydim = height
settings.channels = bands
settings.nopadding = True
settings.dropout_test = False
settings.model_weights = args.model_file
network = densemapnet.DenseMapNet(settings=settings)
model = network.build_model()
# predict disparities for all images in folder
files = glob.glob(args.test_folder + '*LEFT_RGB.tif')
nfiles = len(files)
print('Number of files = ', nfiles)
for i in tqdm(range(nfiles)):
name = files[i]
pos = name.find('LEFT_RGB')
left_name = name
right_name = name[0:pos] + 'RIGHT_RGB.tif'
name = os.path.basename(name)
pos = name.find('LEFT_RGB')
dsp_name = args.output_folder + name[0:pos] + 'LEFT_DSP.tif'
viz_name = args.output_folder + name[0:pos] + 'STEREO_GRAY.tif'
left = tifffile.imread(left_name)
right = tifffile.imread(right_name)
left = np.expand_dims(left,axis=0)
right = np.expand_dims(right,axis=0)
# scale image values to [0,1]
left = (left - 127.5)/255.0
right = (right - 127.5)/255.0
disp = model.predict([left, right])[0,:,:,:]
tifffile.imsave(dsp_name, disp, compress=6)
# save grayscale version of image for visual inspection
disp = disp - disp.min()
disp = ((disp / disp.max()) * 255.0).astype(np.uint8)
disp = cv2.cvtColor(disp,cv2.COLOR_GRAY2RGB)
tifffile.imsave(viz_name, disp, compress=6)
|
tf_agents/networks/nest_map_test.py | anair13/agents | 3,175 | 11137113 | <filename>tf_agents/networks/nest_map_test.py<gh_stars>1000+
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.networks.nest_map."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
import os
from absl import flags
import tensorflow.compat.v2 as tf
from tf_agents.keras_layers import inner_reshape
from tf_agents.networks import nest_map
from tf_agents.networks import sequential
from tf_agents.policies import policy_saver
from tf_agents.policies import tf_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import common
from tf_agents.utils import nest_utils
from tf_agents.utils import test_utils
FLAGS = flags.FLAGS
class MyPolicy(tf_policy.TFPolicy):
def __init__(self, time_step_spec, net):
super(MyPolicy, self).__init__(
time_step_spec,
action_spec=tf.TensorSpec((None,), tf.float32))
self._net = net
def _action(self, time_step, policy_state=(), seed=None):
out, _ = self._net(time_step.observation)
out = tf.math.add(*tf.nest.flatten(out))
return policy_step.PolicyStep(out, (), ())
class NestFlattenTest(test_utils.TestCase):
def testNestFlatten(self):
layer = nest_map.NestFlatten()
outputs = layer({'a': 1, 'b': 2})
self.assertEqual(self.evaluate(outputs), [1, 2])
class NestMapTest(test_utils.TestCase):
def setUp(self):
if not common.has_eager_been_enabled():
self.skipTest('Only supported in TF2.x.')
super(NestMapTest, self).setUp()
def testCreateAndCall(self):
net = sequential.Sequential([
nest_map.NestMap(
{'inp1': tf.keras.layers.Dense(8),
'inp2': sequential.Sequential([
tf.keras.layers.Conv2D(2, 3),
# Convert 3 inner dimensions to [8] for RNN.
inner_reshape.InnerReshape([None] * 3, [8]),
]),
'inp3': tf.keras.layers.LSTM(
8, return_state=True, return_sequences=True)}),
nest_map.NestFlatten(),
tf.keras.layers.Add()])
self.assertEqual(
net.state_spec,
({
'inp1': (),
'inp2': (),
'inp3': (2 * (tf.TensorSpec(shape=(8,), dtype=tf.float32),),),
},))
output_spec = net.create_variables(
{
'inp1': tf.TensorSpec(shape=(3,), dtype=tf.float32),
'inp2': tf.TensorSpec(shape=(4, 4, 2,), dtype=tf.float32),
'inp3': tf.TensorSpec(shape=(3,), dtype=tf.float32),
})
self.assertEqual(output_spec, tf.TensorSpec(shape=(8,), dtype=tf.float32))
inputs = {
'inp1': tf.ones((8, 10, 3), dtype=tf.float32),
'inp2': tf.ones((8, 10, 4, 4, 2), dtype=tf.float32),
'inp3': tf.ones((8, 10, 3), dtype=tf.float32)
}
output, next_state = net(inputs)
self.assertEqual(output.shape, tf.TensorShape([8, 10, 8]))
self.assertEqual(
tf.nest.map_structure(lambda t: t.shape, next_state),
({
'inp1': (),
'inp2': (),
'inp3': (2 * (tf.TensorShape([8, 8]),),),
},))
# Test passing in a state.
output, next_state = net(inputs, next_state)
self.assertEqual(output.shape, tf.TensorShape([8, 10, 8]))
self.assertEqual(
tf.nest.map_structure(lambda t: t.shape, next_state),
({
'inp1': (),
'inp2': (),
'inp3': (2 * (tf.TensorShape([8, 8]),),),
},))
def testNestedNest(self):
# layer structure: {'a': {'b': .}}
net = nest_map.NestMap(
{'a': nest_map.NestMap(
{'b': tf.keras.layers.Dense(8)})})
net.create_variables({'a': {'b': tf.TensorSpec((1,), dtype=tf.float32)}})
def testNestedNestWithNestedState(self):
# layer structure: (., {'a': {'b': .}})
net = nest_map.NestMap(
(tf.keras.layers.Dense(7),
{'a': nest_map.NestMap(
{'b': tf.keras.layers.LSTM(
8, return_state=True, return_sequences=True)})}))
# TODO(b/177337002): remove the forced tuple wrapping the LSTM
# state once we make a generic KerasWrapper network and clean up
# Sequential and NestMap to use that instead of singleton Sequential.
out, state = net(
(tf.ones((1, 2)), {'a': {'b': tf.ones((1, 2))}}),
network_state=((), {'a': {'b': ((tf.ones((1, 8)), tf.ones((1, 8))),)}}))
nest_utils.assert_matching_dtypes_and_inner_shapes(
out,
(
tf.TensorSpec(dtype=tf.float32, shape=(7,)),
{'a': {'b': tf.TensorSpec(dtype=tf.float32, shape=(8,))}}
),
caller=self, tensors_name='out', specs_name='out_expected')
nest_utils.assert_matching_dtypes_and_inner_shapes(
state,
(
(),
{'a': {'b': ((tf.TensorSpec(dtype=tf.float32, shape=(8,)),
tf.TensorSpec(dtype=tf.float32, shape=(8,))),)}}
),
caller=self, tensors_name='state', specs_name='state_expected')
def testIncompatibleStructureInputs(self):
with self.assertRaisesRegex(
TypeError,
r'`nested_layers` and `input_spec` do not have matching structures'):
nest_map.NestMap(
[tf.keras.layers.Dense(8)],
input_spec={'ick': tf.TensorSpec(8, tf.float32)})
with self.assertRaisesRegex(
TypeError,
r'`self.nested_layers` and `inputs` do not have matching structures'):
net = nest_map.NestMap([tf.keras.layers.Dense(8)])
net.create_variables({'ick': tf.TensorSpec((1,), dtype=tf.float32)})
with self.assertRaisesRegex(
TypeError,
r'`self.nested_layers` and `inputs` do not have matching structures'):
net = nest_map.NestMap([tf.keras.layers.Dense(8)])
net({'ick': tf.constant([[1.0]])})
with self.assertRaisesRegex(
ValueError,
r'`network_state` and `state_spec` do not have matching structures'):
net = nest_map.NestMap(
tf.keras.layers.LSTM(8, return_state=True, return_sequences=True))
net(tf.ones((1, 2)), network_state=(tf.ones((1, 1)), ()))
def testPolicySaverCompatibility(self):
observation_spec = {
'a': tf.TensorSpec(4, tf.float32),
'b': tf.TensorSpec(3, tf.float32)
}
time_step_tensor_spec = ts.time_step_spec(observation_spec)
net = nest_map.NestMap(
{'a': tf.keras.layers.LSTM(8, return_state=True, return_sequences=True),
'b': tf.keras.layers.Dense(8)})
net.create_variables(observation_spec)
policy = MyPolicy(time_step_tensor_spec, net)
sample = tensor_spec.sample_spec_nest(
time_step_tensor_spec, outer_dims=(5,))
step = policy.action(sample)
self.assertEqual(step.action.shape.as_list(), [5, 8])
train_step = common.create_variable('train_step')
saver = policy_saver.PolicySaver(policy, train_step=train_step)
self.initialize_v1_variables()
with self.cached_session():
saver.save(os.path.join(FLAGS.test_tmpdir, 'nest_map_model'))
if __name__ == '__main__':
test_utils.main()
|
tests/test_encoding/test_woe_encoder.py | janrito/feature_engine | 650 | 11137115 | <gh_stars>100-1000
import pandas as pd
import pytest
from sklearn.exceptions import NotFittedError
from feature_engine.encoding import WoEEncoder
def test_automatically_select_variables(df_enc):
# test case 1: automatically select variables, woe
encoder = WoEEncoder(variables=None)
encoder.fit(df_enc[["var_A", "var_B"]], df_enc["target"])
X = encoder.transform(df_enc[["var_A", "var_B"]])
# transformed dataframe
transf_df = df_enc.copy()
transf_df["var_A"] = [
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
]
transf_df["var_B"] = [
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
]
# init params
assert encoder.variables is None
# fit params
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.encoder_dict_ == {
"var_A": {
"A": 0.15415067982725836,
"B": -0.5389965007326869,
"C": 0.8472978603872037,
},
"var_B": {
"A": -0.5389965007326869,
"B": 0.15415067982725836,
"C": 0.8472978603872037,
},
}
assert encoder.n_features_in_ == 2
# transform params
pd.testing.assert_frame_equal(X, transf_df[["var_A", "var_B"]])
def test_error_target_is_not_passed(df_enc):
# test case 2: raises error if target is not passed
encoder = WoEEncoder(variables=None)
with pytest.raises(TypeError):
encoder.fit(df_enc)
def test_warn_if_transform_df_contains_categories_not_seen_in_fit(df_enc, df_enc_rare):
# test case 3: when dataset to be transformed contains categories not present
# in training dataset
encoder = WoEEncoder(variables=None)
with pytest.warns(UserWarning):
encoder.fit(df_enc[["var_A", "var_B"]], df_enc["target"])
encoder.transform(df_enc_rare[["var_A", "var_B"]])
def test_error_if_target_not_binary():
# test case 4: the target is not binary
encoder = WoEEncoder(variables=None)
with pytest.raises(ValueError):
df = {
"var_A": ["A"] * 6 + ["B"] * 10 + ["C"] * 4,
"var_B": ["A"] * 10 + ["B"] * 6 + ["C"] * 4,
"target": [1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0],
}
df = pd.DataFrame(df)
encoder.fit(df[["var_A", "var_B"]], df["target"])
def test_error_if_denominator_probability_is_zero():
# test case 5: when the denominator probability is zero
encoder = WoEEncoder(variables=None)
with pytest.raises(ValueError):
df = {
"var_A": ["A"] * 6 + ["B"] * 10 + ["C"] * 4,
"var_B": ["A"] * 10 + ["B"] * 6 + ["C"] * 4,
"target": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0],
}
df = pd.DataFrame(df)
encoder.fit(df[["var_A", "var_B"]], df["target"])
# # # test case 6: when the numerator probability is zero, woe
# # with pytest.raises(ValueError):
# # df = {'var_A': ['A'] * 6 + ['B'] * 10 + ['C'] * 4,
# # 'var_B': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
# # 'target': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1,
# 1, 0, 0]}
# # df = pd.DataFrame(df)
# # encoder.fit(df[['var_A', 'var_B']], df['target'])
#
# # # test case 7: when the denominator probability is zero, woe
# # with pytest.raises(ValueError):
# # df = {'var_A': ['A'] * 6 + ['B'] * 10 + ['C'] * 4,
# # 'var_B': ['A'] * 10 + ['B'] * 6 + ['C'] * 4,
# # 'target': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1,
# 0, 0]}
# # df = pd.DataFrame(df)
# # encoder.fit(df[['var_A', 'var_B']], df['target'])
def test_non_fitted_error(df_enc):
# test case 8: non fitted error
with pytest.raises(NotFittedError):
imputer = WoEEncoder()
imputer.transform(df_enc)
def test_error_if_contains_na_in_fit(df_enc_na):
# test case 9: when dataset contains na, fit method
encoder = WoEEncoder(variables=None)
with pytest.raises(ValueError):
encoder.fit(df_enc_na[["var_A", "var_B"]], df_enc_na["target"])
def test_error_if_df_contains_na_in_transform(df_enc, df_enc_na):
# test case 10: when dataset contains na, transform method}
encoder = WoEEncoder(variables=None)
with pytest.raises(ValueError):
encoder.fit(df_enc[["var_A", "var_B"]], df_enc["target"])
encoder.transform(df_enc_na)
def test_on_numerical_variables(df_enc_numeric):
# ignore_format=True
encoder = WoEEncoder(variables=None, ignore_format=True)
encoder.fit(df_enc_numeric[["var_A", "var_B"]], df_enc_numeric["target"])
X = encoder.transform(df_enc_numeric[["var_A", "var_B"]])
# transformed dataframe
transf_df = df_enc_numeric.copy()
transf_df["var_A"] = [
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
]
transf_df["var_B"] = [
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
]
# init params
assert encoder.variables is None
# fit params
assert encoder.variables_ == ["var_A", "var_B"]
assert encoder.encoder_dict_ == {
"var_A": {
1: 0.15415067982725836,
2: -0.5389965007326869,
3: 0.8472978603872037,
},
"var_B": {
1: -0.5389965007326869,
2: 0.15415067982725836,
3: 0.8472978603872037,
},
}
assert encoder.n_features_in_ == 2
# transform params
pd.testing.assert_frame_equal(X, transf_df[["var_A", "var_B"]])
def test_variables_cast_as_category(df_enc_category_dtypes):
df = df_enc_category_dtypes.copy()
encoder = WoEEncoder(variables=None)
encoder.fit(df[["var_A", "var_B"]], df["target"])
X = encoder.transform(df[["var_A", "var_B"]])
# transformed dataframe
transf_df = df.copy()
transf_df["var_A"] = [
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
]
transf_df["var_B"] = [
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
-0.5389965007326869,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.15415067982725836,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
0.8472978603872037,
]
pd.testing.assert_frame_equal(X, transf_df[["var_A", "var_B"]], check_dtype=False)
assert X["var_A"].dtypes == float
|
aws-blog-campanile/bin/multipartlist.py | securityscorecard/aws-big-data-blog | 305 | 11137117 | #!/usr/bin/python2.7
import sys
import os
import fileinput
import argparse
import math
import uuid
# -----------------------------------------------------------------------------
# Support for Hadoop Streaming Sandbox Env
# -----------------------------------------------------------------------------
sys.path.append(os.environ.get('PWD'))
os.environ["BOTO_PATH"] = '/etc/boto.cfg:~/.boto:./.boto'
import campanile
import boto
from boto.s3.connection import S3Connection
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def main():
## Args
parser = argparse.ArgumentParser()
parser.add_argument('--src-bucket', required=True, dest='src',
help='Source S3 bucket')
parser.add_argument('--dst-bucket', required=True, dest='dst',
help='Destination S3 bucket')
parser.add_argument('--src-endpoint',
default=boto.s3.connection.NoHostProvided,
help='S3 source endpoint')
parser.add_argument('--dst-endpoint',
default=boto.s3.connection.NoHostProvided,
help='S3 destination endpoint')
parser.add_argument('--src-profile',
help='Boto profile used for source connection')
parser.add_argument('--dst-profile',
help='Boto profile used for destination connection')
parser.add_argument('--dry-run', action="store_true",
help='Auto generate multipart-uid')
args = parser.parse_args()
## S3 Bucket Connections
src_bucket = S3Connection(suppress_consec_slashes=False,\
host=args.src_endpoint,is_secure=True,
profile_name=args.src_profile).\
get_bucket(args.src,validate=False)
dst_bucket = S3Connection(suppress_consec_slashes=False,\
host=args.dst_endpoint,is_secure=True,
profile_name=args.dst_profile).\
get_bucket(args.dst,validate=False)
start_index = campanile.stream_index()
for line in fileinput.input("-"):
record = line.rstrip('\n').split('\t')[start_index:]
name, etag, size = record[0:3]
partcount = campanile.partcount(etag)
if partcount == 0:
print '\t'.join(record + [campanile.NULL] * 5)
continue
## Find partsize
partsize = campanile.cli_chunksize(int(size))
if partcount != int(math.ceil(float(size)/partsize)):
campanile.status("Can't calculate partsize for %s/%s\n" %
(args.src, name))
## Add alert
continue
if args.dry_run:
mid = uuid.uuid1()
else:
srckey = src_bucket.get_key(name, validate=True)
metadata = srckey.metadata
headers = {}
## Set Cache and Content Values
if srckey.cache_control is not None:
headers['Cache-Control'] = srckey.cache_control
if srckey.content_type is not None:
headers['Content-Type'] = srckey.content_type
if srckey.content_encoding is not None:
headers['Content-Encoding'] = srckey.content_encoding
if srckey.content_disposition is not None:
headers['Content-Disposition'] = srckey.content_disposition
if srckey.content_language is not None:
headers['Content-Language'] = srckey.content_language
## Initiate Multipart Upload
mid = dst_bucket.initiate_multipart_upload(name,
headers = headers,
metadata = metadata,
encrypt_key = srckey.encrypted).id
for i in range(partcount):
offset = partsize * i
bytes = min(partsize, int(size) - offset)
print '\t'.join(record) + "\t%s\t%s\t%s\t%s\t%s" % (mid,
(i+1), partcount, offset, (offset + bytes - 1))
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
REST/python/Targets/add-role-to-target.py | gdesai1234/OctopusDeploy-Api | 199 | 11137132 | <filename>REST/python/Targets/add-role-to-target.py<gh_stars>100-1000
import json
import requests
octopus_server_uri = 'https://your.octopus.app/api'
octopus_api_key = 'API-YOURAPIKEY'
headers = {'X-Octopus-ApiKey': octopus_api_key}
def get_octopus_resource(uri):
response = requests.get(uri, headers=headers)
response.raise_for_status()
return json.loads(response.content.decode('utf-8'))
def get_by_name(uri, name):
resources = get_octopus_resource(uri)
return next((x for x in resources if x['Name'] == name), None)
space_name = 'Default'
target_name = 'Your Target Name'
target_role = 'your-product-role'
space = get_by_name('{0}/spaces/all'.format(octopus_server_uri), space_name)
target = get_by_name('{0}/{1}/machines/all'.format(octopus_server_uri, space['Id']), target_name)
target['Roles'].append(target_role)
uri = '{0}/{1}/machines/{2}'.format(octopus_server_uri, space['Id'], target['Id'])
response = requests.put(uri, headers=headers, json=target)
response.raise_for_status() |
src/visitpy/visit_flow/flow/src/core/__init__.py | visit-dav/vis | 226 | 11137144 | <filename>src/visitpy/visit_flow/flow/src/core/__init__.py<gh_stars>100-1000
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: __init__.py
author: <NAME> <<EMAIL>>
created: 10/14/2010
description:
Init for flow.core.
"""
from .common import *
from .registry import *
from .filter_graph import *
from .workspace import *
from .state_control import *
from .property_tree import *
from . import errors
|
modules/tools/record_parse_save/parse_radar.py | jzjonah/apollo | 22,688 | 11137151 | <filename>modules/tools/record_parse_save/parse_radar.py
#!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
function to parse radar data from *.record files, created using Apollo-Auto
parsed data is saved to *.txt file, for each scan
currently implementation for:
* Continental ARS-408 radar
"""
import json
import os
import sys
from cyber.python.cyber_py3 import cyber
from cyber.python.cyber_py3 import record
from modules.drivers.proto.conti_radar_pb2 import ContiRadar
class RadarMessageConti408(object):
def __init__(self):
self.radarDetectionList = []
self.timestamp_sec = None
self.num_of_detections = None
self.radar_module = None
self.sequence_num = None
self.radar_channel = None
self.additional_notes = None
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
class ContiRadarARS408Detection(object):
def __init__(self):
self.clusterortrack = None
self.obstacle_id = None
self.longitude_dist = None
self.lateral_dist = None
self.longitude_vel = None
self.lateral_vel = None
self.rcs = None
self.dynprop = None
self.longitude_dist_rms = None
self.lateral_dist_rms = None
self.longitude_vel_rms = None
self.lateral_vel_rms = None
self.probexist = None
self.meas_state = None
self.longitude_accel = None
self.lateral_accel = None
self.oritation_angle = None
self.longitude_accel_rms = None
self.lateral_accel_rms = None
self.oritation_angle_rms = None
self.length = None
self.width = None
self.obstacle_class = None
def pull_conti_radar_detections(obs):
"""
file to convert structure from c++ to python format
"""
dets = ContiRadarARS408Detection()
dets.clusterortrack = obs.clusterortrack
dets.obstacle_id = obs.obstacle_id
dets.longitude_dist = obs.longitude_dist
dets.lateral_dist = obs.lateral_dist
dets.longitude_vel = obs.longitude_vel
dets.lateral_vel = obs.lateral_vel
dets.rcs = obs.rcs
dets.dynprop = obs.dynprop
dets.longitude_dist_rms = obs.longitude_dist_rms
dets.lateral_dist_rms = obs.lateral_dist_rms
dets.longitude_vel_rms = obs.longitude_vel_rms
dets.lateral_vel_rms = obs.lateral_vel_rms
dets.probexist = obs.probexist
dets.meas_state = obs.meas_state
dets.longitude_accel = obs.longitude_accel
dets.lateral_accel = obs.lateral_accel
dets.oritation_angle = obs.oritation_angle
dets.longitude_accel_rms = obs.longitude_accel_rms
dets.lateral_accel_rms = obs.lateral_accel_rms
dets.oritation_angle_rms = obs.oritation_angle_rms
dets.length = obs.length
dets.width = obs.width
dets.obstacle_class = obs.obstacle_class
return dets
def parse_data(channelname, msg, out_folder):
"""
parser for record-file data from continental ars-408 radar
"""
msg_contiradar = ContiRadar()
msg_contiradar.ParseFromString(msg)
n = len(msg_contiradar.contiobs)
detections = []
radar_msg = RadarMessageConti408()
head_msg = msg_contiradar.contiobs[0].header
radar_msg.timestamp_sec = head_msg.timestamp_sec
radar_msg.num_of_detections = n
radar_msg.radar_module = head_msg.module_name
radar_msg.sequence_num = head_msg.sequence_num
radar_msg.radar_channel = channelname
for i in range(len(msg_contiradar.contiobs)):
detections.append(pull_conti_radar_detections(msg_contiradar.contiobs[i]))
radar_msg.radarDetectionList = detections
json_data = radar_msg.toJSON()
tstamp = json_data.split()[-2].ljust(20, '0')
# write this scan to file
scan_filename = "radar_scan_" + tstamp.replace('.', '_') + ".txt"
with open(out_folder + scan_filename, 'w') as outfile:
outfile.write(json_data)
return tstamp
|
qiskit/circuit/library/generalized_gates/pauli.py | Roshan-Thomas/qiskit-terra | 1,456 | 11137204 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Simulator command to perform multiple pauli gates in a single pass
"""
from qiskit.circuit.quantumregister import QuantumRegister
from qiskit.circuit.library.standard_gates.x import XGate
from qiskit.circuit.library.standard_gates.y import YGate
from qiskit.circuit.library.standard_gates.z import ZGate
from qiskit.circuit.gate import Gate
from qiskit.circuit.exceptions import CircuitError
class PauliGate(Gate):
r"""A multi-qubit Pauli gate.
This gate exists for optimization purposes for the
quantum statevector simulation, since applying multiple
pauli gates to different qubits at once can be done via
a single pass on the statevector.
The functionality is equivalent to applying
the pauli gates sequentially using standard Qiskit gates
"""
def __init__(self, label):
super().__init__("pauli", len(label), [label])
def _define(self):
"""
gate pauli (p1 a1,...,pn an) { p1 a1; ... ; pn an; }
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
gates = {"X": XGate, "Y": YGate, "Z": ZGate}
q = QuantumRegister(len(self.params[0]), "q")
qc = QuantumCircuit(q, name=f"{self.name}({self.params[0]})")
paulis = self.params[0]
rules = [(gates[p](), [q[i]], []) for (i, p) in enumerate(reversed(paulis)) if p != "I"]
qc._data = rules
self.definition = qc
def inverse(self):
r"""Return inverted pauli gate (itself)."""
return PauliGate(self.params[0]) # self-inverse
def __array__(self, dtype=None):
"""Return a Numpy.array for the pauli gate.
i.e. tensor product of the paulis"""
# pylint: disable=cyclic-import
from qiskit.quantum_info.operators import Pauli
return Pauli(self.params[0]).__array__(dtype=dtype)
def validate_parameter(self, parameter):
if isinstance(parameter, str):
if all(c in ["I", "X", "Y", "Z"] for c in parameter):
return parameter
else:
raise CircuitError(
f"Parameter string {parameter} should contain only 'I', 'X', 'Y', 'Z' characters"
)
else:
raise CircuitError(
f"Parameter {parameter} should be a string of 'I', 'X', 'Y', 'Z' characters"
)
|
test/sample.py | vlcinsky/nameko | 3,425 | 11137209 | <filename>test/sample.py<gh_stars>1000+
import logging
from nameko.rpc import rpc
log = logging.getLogger('test.sample')
class Service(object):
name = "service"
@rpc
def ping(self):
log.info('ping!')
|
venv/Lib/site-packages/scipy/linalg/tests/test_solvers.py | unbun/snake.ai | 6,989 | 11137220 | from __future__ import division, print_function, absolute_import
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
from pytest import raises as assert_raises
from scipy.linalg import solve_sylvester
from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov
from scipy.linalg import solve_continuous_are, solve_discrete_are
from scipy.linalg import block_diag, solve, LinAlgError
def _load_data(name):
"""
Load npz data file under data/
Returns a copy of the data, rather than keeping the npz file open.
"""
filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'data', name)
with np.load(filename) as f:
return dict(f.items())
class TestSolveLyapunov(object):
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))),
]
def test_continuous_squareness_and_shape(self):
nsq = np.ones((3, 2))
sq = np.eye(3)
assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
def check_continuous_case(self, a, q):
x = solve_continuous_lyapunov(a, q)
assert_array_almost_equal(
np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(
np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
def test_solve_continuous_are():
mat6 = _load_data('carex_6_data.npz')
mat15 = _load_data('carex_15_data.npz')
mat18 = _load_data('carex_18_data.npz')
mat19 = _load_data('carex_19_data.npz')
mat20 = _load_data('carex_20_data.npz')
cases = [
# Carex examples taken from (with default parameters):
# [1] P.BENNER, <NAME>, <NAME>: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
# Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# Test Case 0: carex #1
(np.diag([1.], 1),
np.array([[0], [1]]),
block_diag(1., 2.),
1,
None),
# Test Case 1: carex #2
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4.]]),
1,
None),
# Test Case 2: carex #3
(np.array([[0, 1, 0, 0],
[0, -1.89, 0.39, -5.53],
[0, -0.034, -2.98, 2.43],
[0.034, -0.0011, -0.99, -0.21]]),
np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
np.array([[2.313, 2.727, 0.688, 0.023],
[2.727, 4.271, 1.148, 0.323],
[0.688, 1.148, 0.313, 0.102],
[0.023, 0.323, 0.102, 0.083]]),
np.eye(2),
None),
# Test Case 3: carex #4
(np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
[0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
[0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
[0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
[0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
[0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
[0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
[0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
[-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
).T * 0.001,
np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
[0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
[0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
np.eye(2),
None),
# Test Case 4: carex #5
(np.array(
[[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
[-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
[-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
[-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
[-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
[0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
[0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
[0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
[0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
np.array([[0.010, -0.011, -0.151],
[0.003, -0.021, 0.000],
[0.009, -0.059, 0.000],
[0.024, -0.162, 0.000],
[0.068, -0.445, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000],
[0.000, 0.000, 0.000]]),
np.eye(9),
np.eye(3),
None),
# Test Case 5: carex #6
(mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
# Test Case 6: carex #7
(np.array([[1, 0], [0, -2.]]),
np.array([[1e-6], [0]]),
np.ones((2, 2)),
1.,
'Bad residual accuracy'),
# Test Case 7: carex #8
(block_diag(-0.1, -0.02),
np.array([[0.100, 0.000], [0.001, 0.010]]),
np.array([[100, 1000], [1000, 10000]]),
np.ones((2, 2)) + block_diag(1e-6, 0),
None),
# Test Case 8: carex #9
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1.]]),
np.eye(2),
1.,
None),
# Test Case 9: carex #10
(np.array([[1.0000001, 1], [1., 1.0000001]]),
np.eye(2),
np.eye(2),
np.eye(2),
None),
# Test Case 10: carex #11
(np.array([[3, 1.], [4, 2]]),
np.array([[1], [1]]),
np.array([[-11, -5], [-5, -2.]]),
1.,
None),
# Test Case 11: carex #12
(np.array([[7000000., 2000000., -0.],
[2000000., 6000000., -2000000.],
[0., -2000000., 5000000.]]) / 3,
np.eye(3),
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
np.diag([1e-6, 1, 1e6])).dot(
np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
np.eye(3) * 1e6,
'Bad Residual Accuracy'),
# Test Case 12: carex #13
(np.array([[0, 0.4, 0, 0],
[0, 0, 0.345, 0],
[0, -0.524e6, -0.465e6, 0.262e6],
[0, 0, 0, -1e6]]),
np.array([[0, 0, 0, 1e6]]).T,
np.diag([1, 0, 1, 0]),
1.,
None),
# Test Case 13: carex #14
(np.array([[-1e-6, 1, 0, 0],
[-1, -1e-6, 0, 0],
[0, 0, 1e-6, 1],
[0, 0, -1, 1e-6]]),
np.ones((4, 1)),
np.ones((4, 4)),
1.,
None),
# Test Case 14: carex #15
(mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
# Test Case 15: carex #16
(np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
np.eye(64),
np.eye(64),
np.eye(64),
None),
# Test Case 16: carex #17
(np.diag(np.ones((20, )), 1),
np.flipud(np.eye(21, 1)),
np.eye(21, 1) * np.eye(21, 1).T,
1,
'Bad Residual Accuracy'),
# Test Case 17: carex #18
(mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
# Test Case 18: carex #19
(mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
'Bad Residual Accuracy'),
# Test Case 19: carex #20
(mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
'Bad Residual Accuracy')
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
None, 9, 14, 13, 14, None, 12, None, None)
def _test_factory(case, dec):
"""Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_continuous_are(a, b, q, r)
res = x.dot(a) + a.conj().T.dot(x) + q
out_fact = x.dot(b)
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_solve_discrete_are():
cases = [
# Darex examples taken from (with default parameters):
# [1] P.BENNER, <NAME>, <NAME>: 'A Collection of Benchmark
# Examples for the Numerical Solution of Algebraic Riccati
# Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
# <NAME>, TU Chemnitz-Zwickau (Germany), 1995.
# [2] <NAME>, <NAME>, <NAME>: 'Scaling of the
# Discrete-Time Algebraic Riccati Equation to Enhance Stability
# of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
#
# The format of the data is (a, b, q, r, knownfailure), where
# knownfailure is None if the test passes or a string
# indicating the reason for failure.
#
# TEST CASE 0 : Complex a; real b, q, r
(np.array([[2, 1-2j], [0, -3j]]),
np.array([[0], [1]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 1 :Real a, q, r; complex b
(np.array([[2, 1], [0, -1]]),
np.array([[-2j], [1j]]),
np.array([[1, 0], [0, 2]]),
np.array([[1]]),
None),
# TEST CASE 2 : Real a, b; complex q, r
(np.array([[3, 1], [0, -1]]),
np.array([[1, 2], [1, 3]]),
np.array([[1, 1+1j], [1-1j, 2]]),
np.array([[2, -2j], [2j, 3]]),
None),
# TEST CASE 3 : User-reported gh-2251 (Trac #1732)
(np.array([[0.63399379, 0.54906824, 0.76253406],
[0.5404729, 0.53745766, 0.08731853],
[0.27524045, 0.84922129, 0.4681622]]),
np.array([[0.96861695], [0.05532739], [0.78934047]]),
np.eye(3),
np.eye(1),
None),
# TEST CASE 4 : darex #1
(np.array([[4, 3], [-4.5, -3.5]]),
np.array([[1], [-1]]),
np.array([[9, 6], [6, 4]]),
np.array([[1]]),
None),
# TEST CASE 5 : darex #2
(np.array([[0.9512, 0], [0, 0.9048]]),
np.array([[4.877, 4.877], [-1.1895, 3.569]]),
np.array([[0.005, 0], [0, 0.02]]),
np.array([[1/3, 0], [0, 3]]),
None),
# TEST CASE 6 : darex #3
(np.array([[2, -1], [1, 0]]),
np.array([[1], [0]]),
np.array([[0, 0], [0, 1]]),
np.array([[0]]),
None),
# TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
(np.array([[0, 1], [0, -1]]),
np.array([[1, 0], [2, 1]]),
np.array([[-4, -4], [-4, 7]]) * (1/11),
np.array([[9, 3], [3, 1]]),
None),
# TEST CASE 8 : darex #5
(np.array([[0, 1], [0, 0]]),
np.array([[0], [1]]),
np.array([[1, 2], [2, 4]]),
np.array([[1]]),
None),
# TEST CASE 9 : darex #6
(np.array([[0.998, 0.067, 0, 0],
[-.067, 0.998, 0, 0],
[0, 0, 0.998, 0.153],
[0, 0, -.153, 0.998]]),
np.array([[0.0033, 0.0200],
[0.1000, -.0007],
[0.0400, 0.0073],
[-.0028, 0.1000]]),
np.array([[1.87, 0, 0, -0.244],
[0, 0.744, 0.205, 0],
[0, 0.205, 0.589, 0],
[-0.244, 0, 0, 1.048]]),
np.eye(2),
None),
# TEST CASE 10 : darex #7
(np.array([[0.984750, -.079903, 0.0009054, -.0010765],
[0.041588, 0.998990, -.0358550, 0.0126840],
[-.546620, 0.044916, -.3299100, 0.1931800],
[2.662400, -.100450, -.9245500, -.2632500]]),
np.array([[0.0037112, 0.0007361],
[-.0870510, 9.3411e-6],
[-1.198440, -4.1378e-4],
[-3.192700, 9.2535e-4]]),
np.eye(4)*1e-2,
np.eye(2),
None),
# TEST CASE 11 : darex #8
(np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
[1.0000000, 0.6000000, 0.8000000, 3.3999820],
[0.0000000, 1.0000000, 1.8000000, 3.7999820],
[0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
np.array([[1.0, -1.0, -1.0, -1.0],
[0.0, 1.0, -1.0, -1.0],
[0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, 0.0, 1.0]]),
np.array([[2, 1, 3, 6],
[1, 2, 2, 5],
[3, 2, 6, 11],
[6, 5, 11, 22]]),
np.eye(4),
None),
# TEST CASE 12 : darex #9
(np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
[40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
[12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
[4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
[0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
np.array([[0.0434, -0.0122],
[2.6606, -1.0453],
[3.7530, -5.5100],
[3.6076, -6.6000],
[0.4617, -0.9148]]) * 0.01,
np.eye(5),
np.eye(2),
None),
# TEST CASE 13 : darex #10
(np.kron(np.eye(2), np.diag([1, 1], k=1)),
np.kron(np.eye(2), np.array([[0], [0], [1]])),
np.array([[1, 1, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, -1, 1, 0],
[0, 0, 0, 0, 0, 0]]),
np.array([[3, 0], [0, 1]]),
None),
# TEST CASE 14 : darex #11
(0.001 * np.array(
[[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
[76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
[-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
[-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
[-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
[-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
[-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
[-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
[-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
np.array([[4.7600, -0.5701, -83.6800],
[0.8790, -4.7730, -2.7300],
[1.4820, -13.1200, 8.8760],
[3.8920, -35.1300, 24.8000],
[10.3400, -92.7500, 66.8000],
[7.2030, -61.5900, 38.3400],
[4.4540, -36.8300, 20.2900],
[1.9710, -15.5400, 6.9370],
[3.7730, -30.2800, 14.6900]]) * 0.001,
np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
np.eye(3),
None),
# TEST CASE 15 : darex #12 - numerically least accurate example
(np.array([[0, 1e6], [0, 0]]),
np.array([[0], [1]]),
np.eye(2),
np.array([[1]]),
None),
# TEST CASE 16 : darex #13
(np.array([[16, 10, -2],
[10, 13, -8],
[-2, -8, 7]]) * (1/9),
np.eye(3),
1e6 * np.eye(3),
1e6 * np.eye(3),
None),
# TEST CASE 17 : darex #14
(np.array([[1 - 1/1e8, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]),
np.array([[1e-08], [0], [0], [0]]),
np.diag([0, 0, 0, 1]),
np.array([[0.25]]),
None),
# TEST CASE 18 : darex #15
(np.eye(100, k=1),
np.flipud(np.eye(100, 1)),
np.eye(100),
np.array([[1]]),
None)
]
# Makes the minimum precision requirements customized to the test.
# Here numbers represent the number of decimals that agrees with zero
# matrix when the solution x is plugged in to the equation.
#
# res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
#
# If the test is failing use "None" for that entry.
#
min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 15, 13,
14, 13, 13, 14, 12, 2, 5, 6, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_discrete_are(a, b, q, r)
res = a.conj().T.dot(x.dot(a)) - x + q
res -= a.conj().T.dot(x.dot(b)).dot(
solve(r+b.conj().T.dot(x.dot(b)), b.conj().T).dot(x.dot(a))
)
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
# An infeasible example taken from https://arxiv.org/abs/1505.04861v1
A = np.triu(np.ones((3, 3)))
A[0, 1] = -1
B = np.array([[1, 1, 0], [0, 0, 1]]).T
Q = -2*np.ones_like(A) + np.diag([8, -1, -1.9])
R = np.diag([-10, 0.1])
assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R)
def test_solve_generalized_continuous_are():
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None)
]
min_decimal = (10, 10)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_continuous_are(a, b, q, r, e, s)
res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q
out_fact = e.conj().T.dot(x).dot(b) + s
res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_solve_generalized_discrete_are():
mat20170120 = _load_data('gendare_20170120_data.npz')
cases = [
# Two random examples differ by s term
# in the absence of any literature for demanding examples.
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.zeros((3, 2)),
None),
(np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
[4.617139e-02, 6.948286e-01, 3.444608e-02],
[9.713178e-02, 3.170995e-01, 4.387444e-01]]),
np.array([[3.815585e-01, 1.868726e-01],
[7.655168e-01, 4.897644e-01],
[7.951999e-01, 4.455862e-01]]),
np.eye(3),
np.eye(2),
np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
[7.093648e-01, 6.797027e-01, 1.189977e-01],
[7.546867e-01, 6.550980e-01, 4.983641e-01]]),
np.ones((3, 2)),
None),
# user-reported (under PR-6616) 20-Jan-2017
# tests against the case where E is None but S is provided
(mat20170120['A'],
mat20170120['B'],
mat20170120['Q'],
mat20170120['R'],
None,
mat20170120['S'],
None),
]
min_decimal = (11, 11, 16)
def _test_factory(case, dec):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
a, b, q, r, e, s, knownfailure = case
if knownfailure:
pytest.xfail(reason=knownfailure)
x = solve_discrete_are(a, b, q, r, e, s)
if e is None:
e = np.eye(a.shape[0])
if s is None:
s = np.zeros_like(b)
res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q
res -= (a.conj().T.dot(x.dot(b)) + s).dot(
solve(r+b.conj().T.dot(x.dot(b)),
(b.conj().T.dot(x.dot(a)) + s.conj().T)
)
)
assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
for ind, case in enumerate(cases):
_test_factory(case, min_decimal[ind])
def test_are_validate_args():
def test_square_shape():
nsq = np.ones((3, 2))
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nsq, 1, 1, 1)
assert_raises(ValueError, x, sq, sq, nsq, 1)
assert_raises(ValueError, x, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, sq, sq, nsq)
def test_compatible_sizes():
nsq = np.ones((3, 2))
sq = np.eye(4)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, nsq, 1, 1)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq)
assert_raises(ValueError, x, sq, sq, np.eye(3), sq)
assert_raises(ValueError, x, sq, sq, sq, np.eye(3))
assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3))
def test_symmetry():
nsym = np.arange(9).reshape(3, 3)
sym = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sym, sym, nsym, sym)
assert_raises(ValueError, x, sym, sym, sym, nsym)
def test_singularity():
sing = 1e12 * np.ones((3, 3))
sing[2, 2] -= 1
sq = np.eye(3)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, sq, sq, sq, sq, sing)
assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing)
def test_finiteness():
nm = np.ones((2, 2)) * np.nan
sq = np.eye(2)
for x in (solve_continuous_are, solve_discrete_are):
assert_raises(ValueError, x, nm, sq, sq, sq)
assert_raises(ValueError, x, sq, nm, sq, sq)
assert_raises(ValueError, x, sq, sq, nm, sq)
assert_raises(ValueError, x, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, nm)
assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm)
class TestSolveSylvester(object):
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0],
[0, 1.0, 2.0, 0.0],
[0, 0, 3.0, -4],
[0, 0, 2, 5]]),
np.array([[2.0, 0, 0, 1.0],
[0, 1.0, 0.0, 0.0],
[0, 0, 1.0, -1],
[0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0],
[0, 1.0, 0, 0],
[0, 0, 1.0, 0],
[0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0], [-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1, 1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
|
ote_sdk/ote_sdk/tests/entities/test_id.py | ntyukaev/training_extensions | 775 | 11137263 | <reponame>ntyukaev/training_extensions<gh_stars>100-1000
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import pytest
from bson import ObjectId
from ote_sdk.entities.id import ID
from ote_sdk.tests.constants.ote_sdk_components import OteSdkComponent
from ote_sdk.tests.constants.requirements import Requirements
@pytest.mark.components(OteSdkComponent.OTE_SDK)
class TestID:
@pytest.mark.priority_medium
@pytest.mark.unit
@pytest.mark.reqids(Requirements.REQ_1)
def test_id(self):
"""
<b>Description:</b>
Check ID class object initialization
<b>Input data:</b>
ID object with specified representation parameter
<b>Expected results:</b>
Test passes if ID object representation property and __repr__ method return expected values
<b>Steps</b>
1. Check representation property and __repr__ method for ID object with not specified representation parameter
2. Check representation property and __repr__ method for ID object with ObjectId class representation parameter
3. Check representation property and __repr__ method for ID object with str type representation parameter
"""
# Scenario for ID object with not specified representation parameter
no_representation_id = ID()
assert no_representation_id.representation == ""
assert repr(no_representation_id.representation) == "ID()"
# Scenario for ID object with ObjectId class representation parameter
expected_oid = "61a8b869fb7665916a39eb95"
oid_representation = ObjectId(expected_oid)
oid_representation_id = ID(oid_representation)
assert oid_representation_id.representation == "61a8b869fb7665916a39eb95"
assert (
repr(oid_representation_id.representation) == "ID(61a8b869fb7665916a39eb95)"
)
# Scenario for ID object with str-type representation parameter
str_representation = " String-type representation ID_1 "
str_representation_id = ID(str_representation)
# Leading and trailing whitespaces should be removed, only uppercase letters should be replaced by lowercase
assert str_representation_id.representation == "string-type representation id_1"
assert repr(str_representation_id) == "ID(string-type representation id_1)"
|
library/src/greppo/layers/tile_layer.py | greppo-io/greppo | 221 | 11137291 | <gh_stars>100-1000
from dataclasses import dataclass
import uuid
@dataclass
class TileLayerComponent:
def __init__(
self,
url: str,
name: str,
description: str = '',
visible: bool = True,
opacity: float = 1.0,
):
self.url = url
self.name = name
self.description = description
self.visible = visible
self.opacity = opacity
def convert_to_dataclass(self):
id = uuid.uuid4().hex
return TileLayer(id=id, url=self.url, name=self.name, description=self.description, visible=self.visible, opacity=self.opacity)
@dataclass()
class TileLayer:
id: str
url: str
name: str
description: str
visible: bool
opacity: float
|
study/vowel_spectrum.py | Kshitiz-Bansal/wavetorch | 470 | 11137302 | """Generate plot of the mean vowel sample spectra
"""
import torch
import wavetorch
from torch.utils.data import TensorDataset, DataLoader
import argparse
import yaml
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.gridspec import GridSpec
import pandas as pd
import librosa
try:
from helpers.plot import mpl_set_latex
mpl_set_latex()
except ImportError:
import warnings
warnings.warn('The helpers package is unavailable', ImportWarning)
n_fft = 2048
sr = 10000
vowels = ['ae', 'ei', 'iy']
colors = ['#fcaf3e', '#ad7fa8', '#ef2929']
# vowels = ['ae', 'eh', 'ih', 'oo', 'ah', 'ei', 'iy', 'uh', 'aw', 'er', 'oa', 'uw']
gender = 'both'
fig,ax=plt.subplots(1,1,constrained_layout=True, figsize=(3.5,2.75))
for i, vowel in enumerate(vowels):
X, _, _ = wavetorch.data.load_all_vowels([vowel], gender=gender, sr=sr)
X_ft = [np.abs(librosa.core.stft(Xi.numpy(),n_fft=n_fft)) for Xi in X]
X_ft_int = np.vstack([Xi.sum(axis=1) for Xi in X_ft])
X_ft_mean = np.mean(X_ft_int,axis=0)
X_ft_std = np.std(X_ft_int,axis=0)
ax.fill_between(librosa.core.fft_frequencies(sr=sr, n_fft=n_fft),
X_ft_mean,
alpha=0.30, color=colors[i], edgecolor="none", zorder=i ,lw=0)
ax.plot(librosa.core.fft_frequencies(sr=sr, n_fft=n_fft),
X_ft_mean,
color=colors[i],zorder=i, label=vowel + ' vowel class', lw=1.0)
# ax.plot(librosa.core.fft_frequencies(sr=sr, n_fft=n_fft),
# X_ft_std, '-',
# label=vowel + ' vowel class', color=colors[i], lw=1, zorder=i)
# ax.set_xlim([0,5000])
# ax.set_ylim([0,13])
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel("Mean energy spectrum (a.u.)")
ax.legend()
plt.show(block=False)
|
example/timestamp-as-a-service/services/web/app.py | fatash89/sanic | 251 | 11137335 | from datetime import datetime
from flask import Flask, render_template
from redis import Redis
app = Flask(__name__)
redis = Redis(host='redis', retry_on_timeout=True)
@app.route('/')
def index():
timestamp_id = redis.get('TIMESTAMP_ID') or 0
return render_template('index.html', timestamps=timestamp_id)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
|
jni-build/jni/include/tensorflow/contrib/learn/python/learn/tests/coordinated_session_test.py | rcelebi/android-elfali | 680 | 11137353 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CoordinatedSession."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import coordinated_session
def BusyWaitForCoordStop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(tf.test.TestCase):
"""CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
tf.constant(0.0)
coord = tf.train.Coordinator()
coord_sess = coordinated_session.CoordinatedSession(sess, coord, [])
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
coord_sess = coordinated_session.CoordinatedSession(sess, coord, [])
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
coord_sess = coordinated_session.CoordinatedSession(sess, coord, [])
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
coord_sess = coordinated_session.CoordinatedSession(sess, coord, [])
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_request_stop_on_exception(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
coord_sess = coordinated_session.CoordinatedSession(sess, coord, [])
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_exception(self):
with self.test_session() as sess:
c = tf.constant(0)
v = tf.identity(c)
coord = tf.train.Coordinator()
threads = [threading.Thread(target=BusyWaitForCoordStop, args=(coord,))
for _ in range(3)]
for t in threads:
t.start()
coord_sess = coordinated_session.CoordinatedSession(sess, coord, threads)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = [threading.Thread(target=BusyWaitForCoordStop,
args=(coord,)) for _ in range(3)]
for t in threads:
t.start()
coord_sess = coordinated_session.CoordinatedSession(sess, coord, threads)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
if __name__ == '__main__':
tf.test.main()
|
pundle.py | ksimmi/pundler | 209 | 11137362 | # encoding: utf-8
"""
Data Model, start here to not get mad
=====================================
Main entity will be distribution, like Flask. Per key
Pundle tracks three state parts:
1. requirement, like Flask>0.12.2.
2. frozen version, like ==0.12.2
3. installed distributions, like [flask==0.12.2, flask==0.10.0]
Requirement basically is from file, like requirements.txt, setup.py or Pipfile. This requirements
have source like `requirements.txt`. And base requirements can have dependencies. This
dependencies are turned to requirements too with source like `Flask-Admin << requirements.txt`.
To track requirements we use `CustomReq` class. It can work with PyPi and VCS requirements.
CustomReq can have `self.req = pkg_resources.Requirement` or custom vcs line.
Distribution is one of VCSDist or pkg_resources.DistInfoDistribution. VCSDist is to
track installed VCS packages and pkg_resources.DistInfoDistribution is for PyPi packages.
All three states of distribution are tracked inside `RequirementState` object that includes info
about requirement, frozen version and installed versions.
`Suite` keeps state of all distributions like a dictionary of RequirentStates.
To populate Suite and to parse all requirements, frozen versions and what we have installed pundle
uses `Parser`. There is plenty of them - `Parser` that works with `requirements.txt`,
`SetupParser` that works with `setup.py`, PipfileParser that works with Pipfile and Pipfile.lock.
"""
from __future__ import print_function
import re
try:
from urllib.parse import urlparse, parse_qsl
except ImportError: # pragma: no cover
from urlparse import urlparse, parse_qsl
from collections import defaultdict
from base64 import b64encode, b64decode
import platform
import os.path as op
import os
from os import makedirs
import stat
import tempfile
import shutil
import subprocess
import sys
import shlex
import json
import hashlib
import pkg_resources
try:
from pip import main as pip_exec
except ImportError:
from pip._internal import main as pip_exec
from types import ModuleType
if isinstance(pip_exec, ModuleType):
pip_exec = pip_exec.main
# TODO bundle own version of distlib. Perhaps
try:
from pip._vendor.distlib import locators
except ImportError: # pragma: no cover
from pip.vendor.distlib import locators
try:
str_types = (basestring,)
except NameError: # pragma: no cover
str_types = (str, bytes)
try:
pkg_resources_parse_version = pkg_resources.SetuptoolsVersion
except AttributeError: # pragma: no cover
pkg_resources_parse_version = pkg_resources.packaging.version.Version
def print_message(*a, **kw):
print(*a, **kw)
class PundleException(Exception):
pass
def python_version_string():
"""We use it to generate per python folder name, where
we will install all packages.
"""
if platform.python_implementation() == 'PyPy':
version_info = sys.pypy_version_info
else:
version_info = sys.version_info
version_string = '{v.major}.{v.minor}.{v.micro}'.format(v=version_info)
build, _ = platform.python_build()
build = build.replace(':', '_') # windows do not understand `:` in path
return '{}-{}-{}'.format(platform.python_implementation(), version_string, build)
def parse_file(filename):
"""Helper to parse requirements.txt or frozen.txt.
"""
res = []
with open(filename) as f:
for line in f:
s = line.strip()
if s and not s.startswith('#'):
if s.startswith('-r'):
continue
if s.startswith('-e '):
s = s[3:].strip()
if parse_vcs_requirement(s):
res.append(s)
else:
req = shlex.split(s, comments=True)
res.append(req[0])
return res
def test_vcs(req):
"""Checks if requirement line is for VCS.
"""
return '+' in req and req.index('+') == 3
def parse_vcs_requirement(req):
"""Parses VCS line to egg name, version etc.
"""
if '+' not in req:
return None
vcs, url = req.split('+', 1)
if vcs not in ('git', 'svn', 'hg'):
return None
parsed_url = urlparse(url)
parsed = dict(parse_qsl(parsed_url.fragment))
if 'egg' not in parsed:
return None
egg = parsed['egg'].rsplit('-', 1)
if len(egg) > 1:
try:
pkg_resources_parse_version(egg[1])
except pkg_resources._vendor.packaging.version.InvalidVersion:
return parsed['egg'].lower(), req, None
return egg[0].lower(), req, egg[1]
else:
return parsed['egg'].lower(), req, None
def parse_frozen_vcs(req):
res = parse_vcs_requirement(req)
if not res:
return
return res[0], res[1]
class VCSDist(object):
""" Represents installed VCS distribution.
"""
def __init__(self, directory):
self.dir = directory
name = op.split(directory)[-1]
key, encoded = name.split('+', 1)
self.key = key.lower()
self.line = b64decode(encoded).decode('utf-8')
egg, req, version = parse_vcs_requirement(self.line)
version = version or '0.0.0'
self.hashcmp = (pkg_resources_parse_version(version), -1, egg, self.dir)
self.version = self.line
self.pkg_resource = next(iter(pkg_resources.find_distributions(self.dir, True)), None)
self.location = self.pkg_resource.location
def requires(self, extras=[]):
return self.pkg_resource.requires(extras=extras)
def activate(self):
return self.pkg_resource.activate()
def __lt__(self, other):
return self.hashcmp.__lt__(other.hashcmp)
class CustomReq(object):
"""Represents PyPi or VCS requirement.
Can locate and install it.
"""
def __init__(self, line, env, source=None):
self.line = line
self.egg = None
if isinstance(line, pkg_resources.Requirement):
self.req = line
elif test_vcs(line):
res = parse_vcs_requirement(line)
if not res:
raise PundleException('Bad url %r' % line)
egg, req, version = res
self.egg = egg
self.req = None # pkg_resources.Requirement.parse(res)
else:
self.req = pkg_resources.Requirement.parse(line)
self.sources = set([source])
self.envs = set()
self.add_env(env)
def __contains__(self, something):
if self.req:
return (something in self.req)
elif self.egg:
return something == self.line
else:
return False
def __repr__(self):
return '<CustomReq %r>' % self.__dict__
def why_str(self):
if len(self.sources) < 2:
return '{} << {}'.format(self.line, self.why_str_one(list(self.sources)[0]))
causes = list(sorted(self.why_str_one(source) for source in self.sources))
return '{} << [{}]'.format(self.line, ' | '.join(causes))
def why_str_one(self, source):
if isinstance(source, str_types):
return source
elif isinstance(source, CustomReq):
return source.why_str()
return '?'
def adjust_with_req(self, req):
if not self.req:
return
raise PundleException('VCS')
versions = ','.join(''.join(t) for t in set(self.req.specs + req.req.specs))
self.requirement = pkg_resources.Requirement.parse('{} {}'.format(
self.req.project_name, versions
))
self.sources.update(req.sources)
self.add_env(req.envs)
@property
def key(self):
return self.req.key if self.req else self.egg
@property
def extras(self):
return self.req.extras if self.req else []
def locate(self, suite, prereleases=False):
# requirements can have something after `;` symbol that `locate` does not understand
req = str(self.req).split(';', 1)[0]
dist = suite.locate(req, prereleases=prereleases)
if not dist:
# have not find any releases so search for pre
dist = suite.locate(req, prereleases=True)
if not dist:
raise PundleException('%s can not be located' % self.req)
return dist
def locate_and_install(self, suite, installed=None, prereleases=False):
if self.egg:
key = b64encode(self.line.encode('utf-8')).decode()
target_dir = op.join(suite.parser.directory, '{}+{}'.format(self.egg, key))
target_req = self.line
ready = [
installation
for installation in (installed or [])
if getattr(installation, 'line', None) == self.line
]
else:
loc_dist = self.locate(suite, prereleases=prereleases)
ready = [
installation
for installation in (installed or [])
if installation.version == loc_dist.version
]
target_dir = op.join(suite.parser.directory, '{}-{}'.format(loc_dist.key, loc_dist.version))
# DEL? target_req = '%s==%s' % (loc_dist.name, loc_dist.version)
# If we use custom index, then we want not to configure PIP with it
# and just give it URL
target_req = loc_dist.download_url
if ready:
return ready[0]
try:
makedirs(target_dir)
except OSError:
pass
tmp_dir = tempfile.mkdtemp()
print('Use temp dir', tmp_dir)
try:
print('pip install --no-deps -t %s %s' % (tmp_dir, target_req))
pip_exec([
'install',
'--no-deps',
'-t', tmp_dir,
'-v',
target_req
])
for item in os.listdir(tmp_dir):
shutil.move(op.join(tmp_dir, item), op.join(target_dir, item))
except Exception as exc:
raise PundleException('%s was not installed due error %s' % (self.egg or loc_dist.name, exc))
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
return next(iter(pkg_resources.find_distributions(target_dir, True)), None)
def add_env(self, env):
if isinstance(env, str):
self.envs.add(env)
else:
self.envs.update(env)
class RequirementState(object):
"""Holds requirement state, like what version do we have installed, is
some version frozen or not, what requirement constrains do we have.
"""
def __init__(self, key, req=None, frozen=None, installed=None, hashes=None):
self.key = key
self.requirement = req
self.frozen = frozen
self.hashes = hashes
self.installed = installed or []
self.installed.sort()
self.installed.reverse()
def __repr__(self):
return '<RequirementState %r>' % self.__dict__
def adjust_with_req(self, req):
if self.requirement:
self.requirement.adjust_with_req(req)
else:
self.requirement = req
def has_correct_freeze(self):
return self.requirement and self.frozen and self.frozen in self.requirement
def check_installed_version(self, suite, install=False):
# install version of package if not installed
dist = None
if self.has_correct_freeze():
dist = [
installation
for installation in self.installed
if pkg_resources.parse_version(installation.version) == pkg_resources.parse_version(self.frozen)
]
dist = dist[0] if dist else None
if install and not dist:
dist = self.install_frozen(suite)
if install and not dist:
dist = self.requirement.locate_and_install(suite, installed=self.get_installed())
if dist is None:
raise PundleException('Package %s was not installed due some error' % self.key)
self.frozen = dist.version
self.installed.append(dist)
self.frozen = dist.version
return dist
def get_installed(self):
return [installation for installation in self.installed if installation.version in self.requirement]
def upgrade(self, suite, prereleases=False):
# check if we have fresh packages on PIPY
dists = self.get_installed()
dist = dists[0] if dists else None
latest = self.requirement.locate(suite, prereleases=prereleases)
if not dist or pkg_resources.parse_version(latest.version) > pkg_resources.parse_version(dist.version):
print_message('Upgrade to', latest)
dist = self.requirement.locate_and_install(suite, installed=self.get_installed(), prereleases=prereleases)
# Anyway use latest available dist
self.frozen = dist.version
self.installed.append(dist)
return dist
def reveal_requirements(self, suite, install=False, upgrade=False, already_revealed=None, prereleases=False):
already_revealed = already_revealed.copy() if already_revealed is not None else set()
if self.key in already_revealed:
return
if upgrade:
dist = self.upgrade(suite, prereleases=prereleases)
else:
dist = self.check_installed_version(suite, install=install)
if not dist:
return
already_revealed.add(self.key)
for req in dist.requires(extras=self.requirement.extras):
suite.adjust_with_req(
CustomReq(str(req), self.requirement.envs, source=self.requirement),
install=install,
upgrade=upgrade,
already_revealed=already_revealed,
)
def frozen_dump(self):
if self.requirement.egg:
return self.requirement.line
main = '{}=={}'.format(self.key, self.frozen)
comment = self.requirement.why_str()
return '{:20s} # {}'.format(main, comment)
def frozen_dist(self):
if not self.frozen:
return
for dist in self.installed:
if pkg_resources.parse_version(dist.version) == pkg_resources.parse_version(self.frozen):
return dist
def install_frozen(self, suite):
if self.frozen_dist() or not self.frozen:
return
envs = self.requirement.envs if self.requirement else ''
if test_vcs(self.frozen):
frozen_req = CustomReq(self.frozen, envs)
else:
frozen_req = CustomReq("{}=={}".format(self.key, self.frozen), envs)
dist = frozen_req.locate_and_install(suite)
self.installed.append(dist)
return dist
def activate(self):
dist = self.frozen_dist()
if not dist:
raise PundleException('Distribution is not installed %s' % self.key)
dist.activate()
pkg_resources.working_set.add_entry(dist.location)
# find end execute *.pth
sitedir = dist.location # noqa some PTH search for sitedir
for filename in os.listdir(dist.location):
if not filename.endswith('.pth'):
continue
try:
for line in open(op.join(dist.location, filename)):
if line.startswith('import '):
exec(line.strip())
except Exception as e:
print('Erroneous pth file %r' % op.join(dist.location, filename))
print(e)
class AggregatingLocator(object):
def __init__(self, locators):
self.locators = locators
def locate(self, req, **kw):
for locator in self.locators:
print_message('try', locator, 'for', req)
revealed = locator.locate(req, **kw)
if revealed:
return revealed
class Suite(object):
"""Main object that represents current directory pundle state.
It tracks RequirementStates, envs, urls for package locator.
"""
def __init__(self, parser, envs=[], urls=None):
self.states = {}
self.parser = parser
self.envs = envs
self.urls = urls or ['https://pypi.python.org/simple/']
if 'PIP_EXTRA_INDEX_URL' in os.environ:
self.urls.append(os.environ['PIP_EXTRA_INDEX_URL'])
self.locators = []
for url in self.urls:
self.locators.append(
locators.SimpleScrapingLocator(url, timeout=3.0, scheme='legacy')
)
self.locators.append(locators.JSONLocator(scheme='legacy'))
self.locator = AggregatingLocator(self.locators)
def use(self, key):
"""For single mode
You can call suite.use('arrow') and then `import arrow`
:key: package name
"""
self.adjust_with_req(CustomReq(key, ''))
self.install()
self.activate_all()
def locate(self, *a, **kw):
return self.locator.locate(*a, **kw)
def add(self, key, state):
self.states[key] = state
def __repr__(self):
return '<Suite %r>' % self.states
def required_states(self):
return [state for state in self.states.values() if state.requirement]
def need_freeze(self, verbose=False):
self.install(install=False)
not_correct = not all(state.has_correct_freeze() for state in self.required_states())
if not_correct and verbose:
for state in self.required_states():
if not state.has_correct_freeze():
print(
state.key,
'Need',
state.requirement,
'have not been frozen',
state.frozen,
', installed',
state.installed
)
# TODO
# unneeded = any(state.frozen for state in self.states.values() if not state.requirement)
# if unneeded:
# print('!!! Unneeded', [state.key for state in self.states.values() if not state.requirement])
return not_correct # or unneeded
def adjust_with_req(self, req, install=False, upgrade=False, already_revealed=None):
state = self.states.get(req.key)
if not state:
state = RequirementState(req.key, req=req)
self.add(req.key, state)
else:
state.adjust_with_req(req)
state.reveal_requirements(self, install=install, upgrade=upgrade, already_revealed=already_revealed or set())
def install(self, install=True):
for state in self.required_states():
state.reveal_requirements(self, install=install)
def upgrade(self, key=None, prereleases=False):
states = [self.states[key]] if key else self.required_states()
for state in states:
print('Check', state.requirement.req)
state.reveal_requirements(self, upgrade=True, prereleases=prereleases)
def get_frozen_states(self, env):
return [
state
for state in self.required_states()
if state.requirement and env in state.requirement.envs
]
def need_install(self):
return not all(state.frozen_dist() for state in self.states.values() if state.frozen)
def install_frozen(self):
for state in self.states.values():
state.install_frozen(self)
def activate_all(self, envs=('',)):
for state in self.required_states():
if '' in state.requirement.envs or any(env in state.requirement.envs for env in envs):
state.activate()
def save_frozen(self):
"Saves frozen files to disk"
states_by_env = dict(
(env, self.get_frozen_states(env))
for env in self.parser.envs()
)
self.parser.save_frozen(states_by_env)
class Parser(object):
"""Gather environment info, requirements,
frozen packages and create Suite object
"""
def __init__(
self,
base_path=None,
directory='Pundledir',
requirements_files=None,
frozen_files=None,
package=None,
):
self.base_path = base_path or '.'
self.directory = directory
self.requirements_files = requirements_files
if frozen_files is None:
self.frozen_files = {'': 'frozen.txt'}
else:
self.frozen_files = frozen_files
self.package = package
self.package_envs = set([''])
def envs(self):
if self.requirements_files:
return list(self.requirements_files.keys()) or ['']
elif self.package:
return self.package_envs
return ['']
def get_frozen_file(self, env):
if env in self.frozen_files:
return self.frozen_files[env]
else:
return os.path.join(self.base_path, 'frozen_%s.txt' % env)
def create_suite(self):
reqs = self.parse_requirements()
freezy = self.parse_frozen()
hashes = self.parse_frozen_hashes()
diry = self.parse_directory()
state_keys = set(list(reqs.keys()) + list(freezy.keys()) + list(diry.keys()))
suite = Suite(self, envs=self.envs())
for key in state_keys:
suite.add(
key,
RequirementState(
key,
req=reqs.get(key),
frozen=freezy.get(key),
installed=diry.get(key, []),
hashes=hashes.get(key),
),
)
return suite
def parse_directory(self):
if not op.exists(self.directory):
return {}
dists = [
# this magic takes first element or None
next(iter(
pkg_resources.find_distributions(op.join(self.directory, item), True)
), None)
for item in os.listdir(self.directory) if '-' in item
]
dists.extend(
VCSDist(op.join(self.directory, item))
for item in os.listdir(self.directory) if '+' in item
)
dists = filter(None, dists)
result = defaultdict(list)
for dist in dists:
result[dist.key].append(dist)
return result
def parse_frozen(self):
frozen_versions = {}
for env in self.envs():
frozen_file = self.get_frozen_file(env)
if op.exists(frozen_file):
frozen = [
(parse_frozen_vcs(line) or line.split('=='))
for line in parse_file(frozen_file)
]
else:
frozen = []
for name, version in frozen:
frozen_versions[name.lower()] = version
return frozen_versions
def parse_frozen_hashes(self):
"""This implementation does not support hashes yet
"""
return {}
def parse_requirements(self):
all_requirements = {}
for env, req_file in self.requirements_files.items():
requirements = parse_file(req_file)
if env:
source = 'requirements %s file' % env
else:
source = 'requirements file'
for line in requirements:
req = CustomReq(line, env, source=source)
if req.key in all_requirements:
# if requirements exists in other env, then add this env too
all_requirements[req.key].add_env(env)
else:
all_requirements[req.key] = req
return all_requirements
def save_frozen(self, states_by_env):
for env, states in states_by_env.items():
data = '\n'.join(sorted(
state.frozen_dump()
for state in states
)) + '\n'
frozen_file = self.get_frozen_file(env)
with open(frozen_file, 'w') as f:
f.write(data)
class SingleParser(Parser):
"""Parser for console mode.
"""
def parse_requirements(self):
return {}
class SetupParser(Parser):
"""Parser for `setup.py`. Because it mostly used to develop package, we
do not freeze packages to setup.py. We use `frozen.txt`.
"""
def parse_requirements(self):
setup_info = get_info_from_setup(self.package)
if setup_info is None:
raise PundleException('There is no requirements.txt nor setup.py')
install_requires = setup_info.get('install_requires') or []
reqs = [
CustomReq(str(req), '', source='setup.py')
for req in install_requires
]
requirements = dict((req.key, req) for req in reqs)
# we use `feature` as environment for pundle
extras_require = (setup_info.get('extras_require') or {})
for feature, feature_requires in extras_require.items():
for req_line in feature_requires:
req = CustomReq(req_line, feature, source='setup.py')
# if this req already in dict, then add our feature as env
if req.key in requirements:
requirements[req.key].add_env(feature)
else:
requirements[req.key] = req
self.package_envs.add(feature)
return requirements
class PipfileParser(Parser):
"""Parser for Pipfile and Pipfile.lock.
"""
DEFAULT_PIPFILE_SOURCES = [
{
'name': 'pypi',
'url': 'https://pypi.python.org/simple',
'verify_ssl': True,
},
]
def __init__(self, **kw):
self.pipfile = kw.pop('pipfile')
self.pipfile_envs = set([''])
super(PipfileParser, self).__init__(**kw)
# cache
self.loaded_pipfile = None
self.loaded_pipfile_lock = None
def envs(self):
return self.pipfile_envs
def pipfile_content(self):
import toml
if self.loaded_pipfile:
return self.loaded_pipfile
self.loaded_pipfile = toml.load(open(self.pipfile))
return self.loaded_pipfile
def pipfile_lock_content(self):
if self.loaded_pipfile_lock:
return self.loaded_pipfile_lock
try:
self.loaded_pipfile_lock = json.load(open(self.pipfile + '.lock'))
except Exception:
pass
return self.loaded_pipfile_lock
def parse_requirements(self):
info = self.pipfile_content()
all_requirements = {}
for info_key in info:
if not info_key.endswith('packages'):
continue
if '-' in info_key:
env, _ = info_key.split('-', 1)
else:
env = ''
self.pipfile_envs.add(env)
for key, details in info[info_key].items():
if isinstance(details, str_types):
if details != '*':
key = key + details # details is a version requirement
req = CustomReq(key, env, source='Pipfile')
else:
# a dict
if 'file' in details or 'path' in details:
raise PundleException('Unsupported Pipfile feature yet %s: %r' % (key, details))
if 'git' in details:
# wow, this as a git package!
req = CustomReq('git+%s#egg=%s' % (details['git'], key), env, source='Pipfile')
else:
# else just simple requirement
req = CustomReq(key + details['version'], env, source='Pipfile')
if req.key in all_requirements:
# if requirements exists in other env, then add this env too
all_requirements[req.key].add_env(env)
else:
all_requirements[req.key] = req
return all_requirements
def parse_frozen(self):
parsed_frozen = self.pipfile_lock_content()
if parsed_frozen is None:
return {}
frozen_versions = {}
for env in parsed_frozen:
if env.startswith('_'):
# this is not an env
continue
for key, details in parsed_frozen[env].items():
if 'vcs' in details:
frozen_versions[key] = details['vcs']
else:
frozen_versions[key] = details.get('version', '0.0.0').lstrip('=')
return frozen_versions
def parse_frozen_hashes(self):
parsed_frozen = self.pipfile_lock_content()
if parsed_frozen is None:
return {}
frozen_versions = {}
for env in parsed_frozen:
if env.startswith('_'):
# this is not an env
continue
for key, details in parsed_frozen[env].items():
frozen_versions[key] = details.get('hashes', [])
return frozen_versions
def hash(self):
"""Returns the SHA256 of the pipfile's data.
From pipfile.
"""
pipfile_content = self.pipfile_content()
data = {
'_meta': {
'sources': pipfile_content.get('sources') or self.DEFAULT_PIPFILE_SOURCES,
'requires': pipfile_content.get('requires') or {},
},
'default': pipfile_content.get('packages') or {},
'develop': pipfile_content.get('dev-packages') or {},
}
content = json.dumps(data, sort_keys=True, separators=(",", ":"))
return hashlib.sha256(content.encode("utf8")).hexdigest()
def save_frozen(self, states_by_env):
"""Implementation is not complete.
"""
data = self.pipfile_lock_content() or {}
data.setdefault('_meta', {
'pipfile-spec': 5,
'requires': {},
'sources': self.DEFAULT_PIPFILE_SOURCES,
})
data.setdefault('_meta', {}).setdefault('hash', {})['sha256'] = self.hash()
for env, states in states_by_env.items():
if env == '':
env_key = 'default'
elif env == 'dev':
env_key = 'develop'
else:
env_key = env
reqs = data.setdefault(env_key, {})
for state in states:
if state.requirement.egg:
egg, url, version = parse_vcs_requirement(state.requirement.line)
reqs[state.key] = {
'vcs': url,
}
else:
reqs[state.key] = {
'version': '==' + state.frozen,
'hashes': state.hashes or [],
}
with open(self.pipfile + '.lock', 'w') as f:
f.write(json.dumps(data, sort_keys=True, indent=4))
def create_parser(**parser_args):
"""Utility function that tried to figure out what Parser to use
in current directory.
"""
if parser_args.get('requirements_files'):
return Parser(**parser_args)
elif parser_args.get('package'):
return SetupParser(**parser_args)
elif parser_args.get('pipfile'):
return PipfileParser(**parser_args)
return SingleParser(**parser_args)
# Utilities
def get_info_from_setup(path):
"""Mock setuptools.setup(**kargs) to get
package information about requirements and extras
"""
preserve = {}
def _save_info(**setup_args):
preserve['args'] = setup_args
import setuptools
original_setup = setuptools.setup
setuptools.setup = _save_info
import runpy
runpy.run_path(os.path.join(path, 'setup.py'), run_name='__main__')
setuptools.setup = original_setup
return preserve.get('args')
def search_files_upward(start_path=None):
"Search for requirements.txt, setup.py or Pipfile upward"
if not start_path:
start_path = op.abspath(op.curdir)
if any(
op.exists(op.join(start_path, filename))
for filename in ('requirements.txt', 'setup.py', 'Pipfile')
):
return start_path
up_path = op.abspath(op.join(start_path, '..'))
if op.samefile(start_path, up_path):
return None
return search_files_upward(start_path=up_path)
def find_all_prefixed_files(directory, prefix):
"find all requirements_*.txt files"
envs = {}
for entry in os.listdir(directory):
if not entry.startswith(prefix):
continue
name, ext = op.splitext(entry)
env = name[len(prefix):].lstrip('_')
envs[env] = op.join(directory, entry)
return envs
def create_parser_parameters():
base_path = search_files_upward()
if not base_path:
raise PundleException('Can not find requirements.txt nor setup.py nor Pipfile')
py_version_path = python_version_string()
pundledir_base = os.environ.get('PUNDLEDIR') or op.join(op.expanduser('~'), '.pundledir')
if op.exists(op.join(base_path, 'requirements.txt')):
requirements_files = find_all_prefixed_files(base_path, 'requirements')
else:
requirements_files = {}
envs = list(requirements_files.keys()) or ['']
params = {
'base_path': base_path,
'frozen_files': {
env: op.join(base_path, 'frozen_%s.txt' % env if env else 'frozen.txt')
for env in envs
},
'directory': op.join(pundledir_base, py_version_path),
}
if requirements_files:
params['requirements_files'] = requirements_files
elif op.exists(op.join(base_path, 'setup.py')):
params['package'] = base_path
elif op.exists(op.join(base_path, 'Pipfile')):
params['pipfile'] = op.join(base_path, 'Pipfile')
else:
return
return params
def create_parser_or_exit():
parser_kw = create_parser_parameters()
if not parser_kw:
print_message('You have not requirements.txt. Create it and run again.')
exit(1)
return parser_kw
# Commands
def upgrade_all(**kw):
key = kw.pop('key')
prereleases = kw.pop('prereleases')
suite = create_parser(**kw).create_suite()
suite.need_freeze()
suite.upgrade(key=key, prereleases=prereleases)
suite.install()
suite.save_frozen()
def install_all(**kw):
suite = create_parser(**kw).create_suite()
if suite.need_freeze() or suite.need_install():
print_message('Install some packages')
suite.install()
else:
print_message('Nothing to do, all packages installed')
suite.save_frozen()
return suite
def activate():
parser_kw = create_parser_parameters()
if not parser_kw:
raise PundleException('Can`t create parser parameters')
suite = create_parser(**parser_kw).create_suite()
if suite.need_freeze(verbose=True):
raise PundleException('frozen file is outdated')
if suite.need_install():
raise PundleException('Some dependencies not installed')
envs = (os.environ.get('PUNDLEENV') or '').split(',')
suite.activate_all(envs=envs)
return suite
FIXATE_TEMPLATE = """
# pundle user customization start
import pundle; pundle.activate()
# pundle user customization end
"""
def fixate():
"puts activation code to usercustomize.py for user"
print_message('Fixate')
import site
userdir = site.getusersitepackages()
if not userdir:
raise PundleException('Can`t fixate due user have not site package directory')
try:
makedirs(userdir)
except OSError:
pass
template = FIXATE_TEMPLATE.replace('op.dirname(__file__)', "'%s'" % op.abspath(op.dirname(__file__)))
usercustomize_file = op.join(userdir, 'usercustomize.py')
print_message('Will edit %s file' % usercustomize_file)
if op.exists(usercustomize_file):
content = open(usercustomize_file).read()
if '# pundle user customization start' in content:
regex = re.compile(r'\n# pundle user customization start.*# pundle user customization end\n', re.DOTALL)
content, res = regex.subn(template, content)
open(usercustomize_file, 'w').write(content)
else:
open(usercustomize_file, 'a').write(content)
else:
open(usercustomize_file, 'w').write(template)
link_file = op.join(userdir, 'pundle.py')
if op.lexists(link_file):
print_message('Remove exist link to pundle')
os.unlink(link_file)
print_message('Create link to pundle %s' % link_file)
os.symlink(op.abspath(__file__), link_file)
print_message('Complete')
def entry_points():
suite = activate()
entries = {}
for r in suite.states.values():
d = r.frozen_dist()
if not d:
continue
if isinstance(d, VCSDist):
continue
scripts = d.get_entry_map().get('console_scripts', {})
for name in scripts:
entries[name] = d
return entries
class CmdRegister:
commands = {}
ordered = []
@classmethod
def cmdline(cls, *cmd_aliases):
def wrap(func):
for alias in cmd_aliases:
cls.commands[alias] = func
cls.ordered.append(alias)
return wrap
@classmethod
def help(cls):
for alias in cls.ordered:
if not alias:
continue
print("{:15s} {}".format(alias, cls.commands[alias].__doc__))
@classmethod
def main(cls):
alias = '' if len(sys.argv) == 1 else sys.argv[1]
if alias == 'help':
cls.help()
return
if alias not in cls.commands:
print('Unknown command\nTry this:')
cls.help()
sys.exit(1)
cls.commands[alias]()
@CmdRegister.cmdline('', 'install')
def cmd_install():
"Install packages by frozen.txt and resolve ones that was not frozen"
install_all(**create_parser_or_exit())
@CmdRegister.cmdline('upgrade')
def cmd_upgrade():
"""
[package [pre]] if package provided will upgrade it and dependencies or all packages from PyPI.
If `pre` provided will look for prereleases.
"""
key = sys.argv[2] if len(sys.argv) > 2 else None
prereleases = sys.argv[3] == 'pre' if len(sys.argv) > 3 else False
upgrade_all(key=key, prereleases=prereleases, **create_parser_or_exit())
CmdRegister.cmdline('fixate')(fixate)
@CmdRegister.cmdline('exec')
def cmd_exec():
"executes setuptools entry"
cmd = sys.argv[2]
args = sys.argv[3:]
entries = entry_points()
if cmd not in entries:
print_message('Script is not found. Check if package is installed, or look at the `pundle entry_points`')
sys.exit(1)
exc = entries[cmd].get_entry_info('console_scripts', cmd).load()
sys.path.insert(0, '')
sys.argv = [cmd] + args
exc()
@CmdRegister.cmdline('entry_points')
def cmd_entry_points():
"prints available setuptools entries"
for entry, package in entry_points().items():
print('%s (%s)' % (entry, package))
@CmdRegister.cmdline('edit')
def cmd_edit():
"prints directory path to package"
parser_kw = create_parser_parameters()
suite = create_parser(**parser_kw).create_suite()
if suite.need_freeze():
raise PundleException('%s file is outdated' % suite.parser.frozen_file)
print(suite.states[sys.argv[2]].frozen_dist().location)
@CmdRegister.cmdline('info')
def cmd_info():
"prints info about Pundle state"
parser_kw = create_parser_parameters()
suite = create_parser(**parser_kw).create_suite()
if suite.need_freeze():
print('frozen.txt is outdated')
else:
print('frozen.txt is up to date')
for state in suite.required_states():
print(
'Requirement "{}", frozen {}, {}'.format(
state.key,
state.frozen,
state.requirement.line if state.requirement else 'None'
)
)
print('Installed versions:')
for dist in state.installed:
print(' ', repr(dist))
if not state.installed:
print(' None')
def run_console(glob):
import readline
import rlcompleter
import atexit
import code
history_path = os.path.expanduser("~/.python_history")
def save_history(history_path=history_path):
readline.write_history_file(history_path)
if os.path.exists(history_path):
readline.read_history_file(history_path)
atexit.register(save_history)
readline.set_completer(rlcompleter.Completer(glob).complete)
readline.parse_and_bind("tab: complete")
code.InteractiveConsole(locals=glob).interact()
@CmdRegister.cmdline('console')
def cmd_console():
"[ipython|bpython|ptpython] starts python console with activated pundle environment"
suite = activate()
glob = {
'pundle_suite': suite,
}
interpreter = sys.argv[2] if len(sys.argv) > 2 else None
if not interpreter:
run_console(glob)
elif interpreter == 'ipython':
from IPython import embed
embed()
elif interpreter == 'ptpython':
from ptpython.repl import embed
embed(glob, {})
elif interpreter == 'bpython':
from bpython import embed
embed(glob)
else:
raise PundleException('Unknown interpreter: {}. Choose one of None, ipython, bpython, ptpython.')
@CmdRegister.cmdline('run')
def cmd_run():
"executes given script"
activate()
import runpy
sys.path.insert(0, '')
script = sys.argv[2]
sys.argv = [sys.argv[2]] + sys.argv[3:]
runpy.run_path(script, run_name='__main__')
@CmdRegister.cmdline('module')
def cmd_module():
"executes module like `python -m`"
activate()
import runpy
sys.path.insert(0, '')
module = sys.argv[2]
sys.argv = [sys.argv[2]] + sys.argv[3:]
runpy.run_module(module, run_name='__main__')
@CmdRegister.cmdline('env')
def cmd_env():
"populates PYTHONPATH with packages paths and executes command line in subprocess"
activate()
aug_env = os.environ.copy()
aug_env['PYTHONPATH'] = ':'.join(sys.path)
subprocess.call(sys.argv[2:], env=aug_env)
@CmdRegister.cmdline('print_env')
def cmd_print_env():
"Prints PYTHONPATH. For usage with mypy and MYPYPATH"
suite = activate()
path = ':'.join(
state.frozen_dist().location
for state in suite.states.values()
if state.frozen_dist()
)
print(path)
ENTRY_POINT_TEMPLATE = '''#! /usr/bin/env python
import pundle; pundle.activate()
pundle.entry_points()['{entry_point}'].get_entry_info('console_scripts', '{entry_point}').load(require=False)()
'''
@CmdRegister.cmdline('linkall')
def link_all():
"links all packages to `.pundle_local` dir"
local_dir = '.pundle_local'
suite = activate()
try:
makedirs(local_dir)
except OSError:
pass
local_dir_info = {de.name: de for de in os.scandir(local_dir)}
for r in suite.states.values():
d = r.frozen_dist()
if not d:
continue
for dir_entry in os.scandir(d.location):
if dir_entry.name.startswith('__') or dir_entry.name.startswith('.') or dir_entry.name == 'bin':
continue
dest_path = os.path.join(local_dir, dir_entry.name)
if dir_entry.name in local_dir_info:
sym = local_dir_info.pop(dir_entry.name)
existed = op.realpath(sym.path)
if existed == dir_entry.path:
continue
os.remove(sym.path)
os.symlink(dir_entry.path, dest_path)
# create entry_points binaries
try:
makedirs(os.path.join(local_dir, 'bin'))
except OSError:
pass
for bin_name, entry_point in entry_points().items():
bin_filename = os.path.join(local_dir, 'bin', bin_name)
open(bin_filename, 'w').write(ENTRY_POINT_TEMPLATE.format(entry_point=bin_name))
file_stat = os.stat(bin_filename)
os.chmod(bin_filename, file_stat.st_mode | stat.S_IEXEC)
local_dir_info.pop('bin')
# remove extra links
for de in local_dir_info:
os.remove(de.path)
@CmdRegister.cmdline('show_requirements')
def show_requirements():
"shows details requirements info"
suite = activate()
for name, state in suite.states.items():
if state.requirement:
print(
name,
'frozen:',
state.frozen,
'required:',
state.requirement.req if state.requirement.req else 'VCS',
state.requirement.envs,
)
# Single mode that you can use in console
_single_mode_suite = {} # cache variable to keep current suite for single_mode
def single_mode():
""" Create, cache and return Suite instance for single_mode.
"""
if not _single_mode_suite:
py_version_path = python_version_string()
pundledir_base = os.environ.get('PUNDLEDIR') or op.join(op.expanduser('~'), '.pundledir')
directory = op.join(pundledir_base, py_version_path)
_single_mode_suite['cache'] = create_parser(directory=directory).create_suite()
return _single_mode_suite['cache']
def use(key):
""" Installs `key` requirement, like `django==1.11` or just `django`
"""
suite = single_mode()
suite.use(key)
if __name__ == '__main__':
CmdRegister.main()
|
kolla_ansible/kolla_address.py | okleinschmidt/kolla-ansible | 531 | 11137370 | <reponame>okleinschmidt/kolla-ansible
# -*- coding: utf-8 -*-
#
# Copyright 2019 <NAME> (yoctozepto)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from jinja2.filters import contextfilter
from jinja2.runtime import Undefined
from kolla_ansible.exception import FilterError
from kolla_ansible.helpers import _call_bool_filter
@contextfilter
def kolla_address(context, network_name, hostname=None):
"""returns IP address on the requested network
The output is affected by '<network_name>_*' variables:
'<network_name>_interface' sets the interface to obtain address for.
'<network_name>_address_family' controls the address family (ipv4/ipv6).
:param context: Jinja2 Context
:param network_name: string denoting the name of the network to get IP
address for, e.g. 'api'
:param hostname: to override host which address is retrieved for
:returns: string with IP address
"""
# NOTE(yoctozepto): watch out as Jinja2 'context' behaves not exactly like
# the python 'dict' (but mimics it most of the time)
# for example it returns a special object of type 'Undefined' instead of
# 'None' or value specified as default for 'get' method
# 'HostVars' shares this behavior
if hostname is None:
hostname = context.get('inventory_hostname')
if isinstance(hostname, Undefined):
raise FilterError("'inventory_hostname' variable is unavailable")
hostvars = context.get('hostvars')
if isinstance(hostvars, Undefined):
raise FilterError("'hostvars' variable is unavailable")
host = hostvars.get(hostname)
if isinstance(host, Undefined):
raise FilterError("'{hostname}' not in 'hostvars'"
.format(hostname=hostname))
del hostvars # remove for clarity (no need for other hosts)
# NOTE(yoctozepto): variable "host" will *not* return Undefined
# same applies to all its children (act like plain dictionary)
interface_name = host.get(network_name + '_interface')
if interface_name is None:
raise FilterError("Interface name undefined "
"for network '{network_name}' "
"(set '{network_name}_interface')"
.format(network_name=network_name))
address_family = host.get(network_name + '_address_family')
if address_family is None:
raise FilterError("Address family undefined "
"for network '{network_name}' "
"(set '{network_name}_address_family')"
.format(network_name=network_name))
address_family = address_family.lower()
if address_family not in ['ipv4', 'ipv6']:
raise FilterError("Unknown address family '{address_family}' "
"for network '{network_name}'"
.format(address_family=address_family,
network_name=network_name))
ansible_interface_name = interface_name.replace('-', '_')
interface = host['ansible_facts'].get(ansible_interface_name)
if interface is None:
raise FilterError("Interface '{interface_name}' "
"not present "
"on host '{hostname}'"
.format(interface_name=interface_name,
hostname=hostname))
af_interface = interface.get(address_family)
if af_interface is None:
raise FilterError("Address family '{address_family}' undefined "
"on interface '{interface_name}' "
"for host: '{hostname}'"
.format(address_family=address_family,
interface_name=interface_name,
hostname=hostname))
if address_family == 'ipv4':
address = af_interface.get('address')
elif address_family == 'ipv6':
# ipv6 has no concept of a secondary address
# explicitly exclude the vip addresses
# to avoid excluding all /128
haproxy_enabled = host.get('enable_haproxy')
if haproxy_enabled is None:
raise FilterError("'enable_haproxy' variable is unavailable")
haproxy_enabled = _call_bool_filter(context, haproxy_enabled)
if haproxy_enabled:
vip_addresses = [
host.get('kolla_internal_vip_address'),
host.get('kolla_external_vip_address'),
]
else:
# no addresses are virtual (kolla-wise)
vip_addresses = []
global_ipv6_addresses = [x for x in af_interface if
x['scope'] == 'global' and
x['address'] not in vip_addresses]
if global_ipv6_addresses:
address = global_ipv6_addresses[0]['address']
else:
address = None
if address is None:
raise FilterError("{address_family} address missing "
"on interface '{interface_name}' "
"for host '{hostname}'"
.format(address_family=address_family,
interface_name=interface_name,
hostname=hostname))
return address
|
tools/app_reg/build.py | kokosing/hue | 5,079 | 11137382 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helpers to build the apps
"""
import logging
import os
import subprocess
import common
LOG = logging.getLogger(__name__)
def runcmd(cmdv, additional_env=None):
"""
runcmd(cmdv, additional_env=None) -> status code
"""
env = os.environ.copy()
if additional_env is not None:
env.update(additional_env)
env['PATH'] = os.path.join(common.INSTALL_ROOT, 'build', 'env', 'bin') + os.path.pathsep + env['PATH']
shell_command = ' '.join(cmdv)
LOG.info("Running '%s' with %r" % (shell_command, additional_env))
popen = subprocess.Popen(shell_command, env=env, shell=True)
return popen.wait()
def make_app(app):
"""
make_app() -> True/False
Call `make egg-info ext-eggs' on the app.
"""
cmdv = [ 'make', '-C', app.abs_path, 'egg-info', 'ext-eggs' ]
return runcmd(cmdv, dict(ROOT=common.INSTALL_ROOT)) == 0
def make_syncdb():
"""
make_syncdb() -> True/False
"""
statuses = []
hue_exec = os.path.join(common.INSTALL_ROOT, 'build', 'env', 'bin', 'hue')
if os.path.exists(hue_exec):
statuses.append( runcmd([ hue_exec, 'makemigrations', '--noinput' ]) )
statuses.append( runcmd([ hue_exec, 'migrate', '--fake-initial' ]) )
return not any(statuses)
def make_collectstatic():
"""
make_collectstatic() -> True/False
"""
statuses = []
hue_exec = os.path.join(common.INSTALL_ROOT, 'build', 'env', 'bin', 'hue')
if os.path.exists(hue_exec):
statuses.append( runcmd([ hue_exec, 'collectstatic', '--noinput' ]) )
return not any(statuses)
|
src/beanmachine/ppl/compiler/fix_problems.py | facebookresearch/beanmachine | 177 | 11137389 | <gh_stars>100-1000
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, Set
import beanmachine.ppl.compiler.profiler as prof
from beanmachine.ppl.compiler.bm_graph_builder import BMGraphBuilder
from beanmachine.ppl.compiler.error_report import ErrorReport
from beanmachine.ppl.compiler.fix_additions import addition_fixer, sum_fixer
from beanmachine.ppl.compiler.fix_arithmetic import negative_real_multiplication_fixer
from beanmachine.ppl.compiler.fix_beta_conjugate_prior import (
beta_bernoulli_conjugate_fixer,
beta_binomial_conjugate_fixer,
)
from beanmachine.ppl.compiler.fix_bool_arithmetic import bool_arithmetic_fixer
from beanmachine.ppl.compiler.fix_bool_comparisons import bool_comparison_fixer
from beanmachine.ppl.compiler.fix_logsumexp import logsumexp_fixer
from beanmachine.ppl.compiler.fix_matrix_scale import trivial_matmul_fixer
from beanmachine.ppl.compiler.fix_multiary_ops import (
multiary_addition_fixer,
multiary_multiplication_fixer,
)
from beanmachine.ppl.compiler.fix_normal_conjugate_prior import (
normal_normal_conjugate_fixer,
)
from beanmachine.ppl.compiler.fix_observations import observations_fixer
from beanmachine.ppl.compiler.fix_observe_true import observe_true_fixer
from beanmachine.ppl.compiler.fix_problem import (
ancestors_first_graph_fixer,
conditional_graph_fixer,
fixpoint_graph_fixer,
GraphFixer,
node_fixer_first_match,
NodeFixer,
sequential_graph_fixer,
)
from beanmachine.ppl.compiler.fix_requirements import requirements_fixer
from beanmachine.ppl.compiler.fix_unsupported import (
bad_matmul_reporter,
unsupported_node_fixer,
unsupported_node_reporter,
untypable_node_reporter,
)
from beanmachine.ppl.compiler.fix_vectorized_models import vectorized_model_fixer
from beanmachine.ppl.compiler.lattice_typer import LatticeTyper
default_skip_optimizations: Set[str] = {
"beta_bernoulli_conjugate_fixer",
"beta_binomial_conjugate_fixer",
"normal_normal_conjugate_fixer",
}
_arithmetic_fixer_factories: List[
Callable[[BMGraphBuilder, LatticeTyper], NodeFixer]
] = [
addition_fixer,
bool_arithmetic_fixer,
bool_comparison_fixer,
logsumexp_fixer,
multiary_addition_fixer,
multiary_multiplication_fixer,
negative_real_multiplication_fixer,
sum_fixer,
trivial_matmul_fixer,
unsupported_node_fixer,
]
def arithmetic_graph_fixer(skip: Set[str], bmg: BMGraphBuilder) -> GraphFixer:
typer = LatticeTyper()
node_fixers = [
f(bmg, typer) for f in _arithmetic_fixer_factories if f.__name__ not in skip
]
node_fixer = node_fixer_first_match(node_fixers)
arith = ancestors_first_graph_fixer(bmg, typer, node_fixer)
return fixpoint_graph_fixer(arith)
_conjugacy_fixer_factories: List[Callable[[BMGraphBuilder], NodeFixer]] = [
beta_bernoulli_conjugate_fixer,
beta_binomial_conjugate_fixer,
normal_normal_conjugate_fixer,
]
def conjugacy_graph_fixer(skip: Set[str], bmg: BMGraphBuilder) -> GraphFixer:
node_fixers = [f(bmg) for f in _conjugacy_fixer_factories if f.__name__ not in skip]
node_fixer = node_fixer_first_match(node_fixers)
# TODO: Make the typer optional
return ancestors_first_graph_fixer(bmg, LatticeTyper(), node_fixer)
def fix_problems(
bmg: BMGraphBuilder, skip_optimizations: Set[str] = default_skip_optimizations
) -> ErrorReport:
bmg._begin(prof.fix_problems)
all_fixers = sequential_graph_fixer(
[
vectorized_model_fixer(bmg),
arithmetic_graph_fixer(skip_optimizations, bmg),
unsupported_node_reporter(bmg),
bad_matmul_reporter(bmg),
untypable_node_reporter(bmg),
conjugacy_graph_fixer(skip_optimizations, bmg),
requirements_fixer(bmg),
observations_fixer(bmg),
conditional_graph_fixer(bmg._fix_observe_true, observe_true_fixer(bmg)),
]
)
_, errors = all_fixers()
bmg._finish(prof.fix_problems)
return errors
|
Trakttv.bundle/Contents/Libraries/Shared/trakt/interfaces/users/__init__.py | disrupted/Trakttv.bundle | 1,346 | 11137433 | from trakt.core.helpers import popitems
from trakt.interfaces.base import Interface, authenticated
from trakt.mapper import CommentMapper, ListMapper
# Import child interfaces
from trakt.interfaces.users.lists import UsersListInterface, UsersListsInterface
from trakt.interfaces.users.settings import UsersSettingsInterface
import logging
log = logging.getLogger(__name__)
__all__ = [
'UsersInterface',
'UsersListsInterface',
'UsersListInterface',
'UsersSettingsInterface'
]
class UsersInterface(Interface):
path = 'users'
@authenticated
def likes(self, type=None, **kwargs):
if type and type not in ['comments', 'lists']:
raise ValueError('Unknown type specified: %r' % type)
if kwargs.get('parse') is False:
raise ValueError('Parse can\'t be disabled on this method')
# Send request
response = self.http.get(
'likes',
params=[type],
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if not items:
return
# Map items to comment/list objects
for item in items:
item_type = item.get('type')
if item_type == 'comment':
yield CommentMapper.comment(
self.client, item
)
elif item_type == 'list':
yield ListMapper.custom_list(
self.client, item
)
else:
log.warn('Unknown item returned, type: %r', item_type)
|
aminator/plugins/manager.py | vijay-khanna/Netflix-aminator | 721 | 11137434 | <reponame>vijay-khanna/Netflix-aminator<gh_stars>100-1000
# -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.manager
========================
Base plugin manager(s) and utils
"""
import abc
import logging
from stevedore.dispatch import NameDispatchExtensionManager
log = logging.getLogger(__name__)
class BasePluginManager(NameDispatchExtensionManager):
"""
Base plugin manager from which all managers *should* inherit
Descendents *must* define a _entry_point class attribute
Descendents *may* define a _check_func class attribute holding a function that determines whether a
given plugin should or should not be enabled
"""
__metaclass__ = abc.ABCMeta
_entry_point = None
_check_func = None
def __init__(self, check_func=None, invoke_on_load=True, invoke_args=None, invoke_kwds=None):
invoke_args = invoke_args or ()
invoke_kwds = invoke_kwds or {}
if self._entry_point is None:
raise AttributeError('Plugin managers must declare their entry point in a class attribute _entry_point')
check_func = check_func or self._check_func
if check_func is None:
check_func = lambda x: True
super(BasePluginManager, self).__init__(namespace=self.entry_point, check_func=check_func, invoke_on_load=invoke_on_load, invoke_args=invoke_args, invoke_kwds=invoke_kwds)
@property
def entry_point(self):
"""
Base plugins for each plugin type must set a _entry_point class attribute to the entry point they
are responsible for
"""
return self._entry_point
|
examples/optimistic_lock.py | rspadim/aiocache | 459 | 11137438 | <filename>examples/optimistic_lock.py
import asyncio
import logging
import random
from aiocache import Cache
from aiocache.lock import OptimisticLock, OptimisticLockError
logger = logging.getLogger(__name__)
cache = Cache(Cache.REDIS, endpoint='127.0.0.1', port=6379, namespace='main')
async def expensive_function():
logger.warning('Expensive is being executed...')
await asyncio.sleep(random.uniform(0, 2))
return 'result'
async def my_view():
async with OptimisticLock(cache, 'key') as lock:
result = await expensive_function()
try:
await lock.cas(result)
except OptimisticLockError:
logger.warning(
'I failed setting the value because it is different since the lock started!')
return result
async def concurrent():
await cache.set('key', 'initial_value')
# All three calls will read 'initial_value' as the value to check and only
# the first one finishing will succeed because the others, when trying to set
# the value, will see that the value is not the same as when the lock started
await asyncio.gather(my_view(), my_view(), my_view())
def test_redis():
loop = asyncio.get_event_loop()
loop.run_until_complete(concurrent())
loop.run_until_complete(cache.delete('key'))
loop.run_until_complete(cache.close())
if __name__ == '__main__':
test_redis()
|
development_playgrounds/TSexperiments/Oscillatory/result_loader.py | ai-di/Brancher | 208 | 11137474 | import pickle
import numpy as np
filename = 'Full_os_results.pickle'
with open(filename, 'rb') as f:
x = pickle.load(f)
MSE = {"ASVI": (np.mean([np.sqrt(error) for error in x["PE"]["MSE"]]), np.std([np.sqrt(error) for error in x["PE"]["MSE"]])/np.sqrt(len(x["PE"]["MSE"]))),
"ADVI (MF)": (np.mean([np.sqrt(error) for error in x["ADVI (MF)"]["MSE"]]), np.std([np.sqrt(error) for error in x["ADVI (MF)"]["MSE"]])/np.sqrt(len(x["ADVI (MF)"]["MSE"]))),
"ADVI (MN)": (np.mean([np.sqrt(error) for error in x["ADVI (MN)"]["MSE"]]), np.std([np.sqrt(error) for error in x["ADVI (MN)"]["MSE"]])/np.sqrt(len(x["ADVI (MN)"]["MSE"]))),
"NN": (np.mean([np.sqrt(error) for error in x["NN"]["MSE"]]), np.std([np.sqrt(error) for error in x["NN"]["MSE"]])/np.sqrt(len(x["NN"]["MSE"])))}
for key, val in MSE.items():
print(key + ": {} +- {}".format(val[0], val[1])) |
maskrcnn_benchmark/modeling/backbone/mixer.py | microsoft/GLIP | 295 | 11137501 | import torch
from torch import nn
class MixedOperationRandom(nn.Module):
def __init__(self, search_ops):
super(MixedOperationRandom, self).__init__()
self.ops = nn.ModuleList(search_ops)
self.num_ops = len(search_ops)
def forward(self, x, x_path=None):
if x_path is None:
output = sum(op(x) for op in self.ops) / self.num_ops
else:
assert isinstance(x_path, (int, float)) and 0 <= x_path < self.num_ops or isinstance(x_path, torch.Tensor)
if isinstance(x_path, (int, float)):
x_path = int(x_path)
assert 0 <= x_path < self.num_ops
output = self.ops[x_path](x)
elif isinstance(x_path, torch.Tensor):
assert x_path.size(0) == x.size(0), 'batch_size should match length of y_idx'
output = torch.cat([self.ops[int(x_path[i].item())](x.narrow(0, i, 1))
for i in range(x.size(0))], dim=0)
return output |
atest/testdata/running/binary_list.py | rdagum/robotframework | 7,073 | 11137508 | <filename>atest/testdata/running/binary_list.py
LIST__illegal_values = ('illegal:\x00\x08\x0B\x0C\x0E\x1F',
'more:\uFFFE\uFFFF')
|
oncogemini/mendelianerror.py | fakedrtom/cancer_gemini | 221 | 11137520 | <filename>oncogemini/mendelianerror.py
"""
Calculate the probability of a mendelian error given the genotype likelihoods
from a trio."""
from __future__ import print_function
import sys
from math import log10
import gzip
nan = float('nan')
class LowGenotypeException(Exception):
pass
def rescale(li):
s = float(sum(li))
if s < 1e-40:
raise LowGenotypeException
return [v / s for v in li]
def mendelian_error(mother, father, child, pls=False):
"""
Return the probability of a mendelian error given the log10 genotype
likelihoods. A large value indicates a high probability of a mendelian
error. Low values mean that the genotype-likelihoods indicate enough
uncertainty that it could be a genotyping error.
# everyone is het:
>>> het = (-2.0, -0.1, -2.0)
>>> mendelian_error(het, het, het)
0.047...
# parents are hom, child is het.
>>> father = mother = [-0.6, -2.5, -2.5]
>>> child = [-2.5, -0.6, -2.5]
>>> mendelian_error(mother, father, child)
0.987...
# same as above, but more certainty in the called genotypes:
>>> child[1] = 1e-6
>>> mother[0] = father[0] = 1e-6
>>> mendelian_error(mother, father, child)
0.996...
# everyone is confidently homozygous alt
>>> child = father = mother = [-11.0, -11.0, -0.1]
>>> mendelian_error(mother, father, child)
7.55...e-11
# everyone is less confidently homozygous refs:
>>> child = father = mother = [-0.1, -2.0, -2.0]
>>> mendelian_error(mother, father, child)
0.071...
mother and fater are homozygous alts
>>> mother = father = [-3.0, -3.0, -0.1]
# child is het
>>> child = [-3., -0.1, -3.]
>>> mendelian_error(mother, father, child)
0.998...
# but when the hom-alt call is close...
>>> child = [-3., -0.1, -0.15]
>>> mendelian_error(mother, father, child)
0.53...
# mother is hom_ref, dad is het, child is hom_alt
>>> mother, father, child = (-0.1, -2, -2), (-2, -0.1, -2), (-2, -2, -0.1)
>>> mendelian_error(mother, father, child)
0.976...
# mother is hom_ref, dad is hom_alt, child is hom_ref
>>> mother, father, child = (-0.1, -2.5, -2.5), (-2.5, -2.5, -0.1), (-0.1, -2.5, -2.5)
>>> mendelian_error(mother, father, child)
0.993...
# same, but child is hom_alt
>>> mendelian_error(mother, father, (-5, -5, -0.01))
0.994...
# child should be het:
>>> mendelian_error(mother, father, (-3, 0, -3))
0.75...
# NOTE: does oddish things if all have very low, equal values.
>>> mendelian_error([-16.2, -16.2, -16.2], [-14.4, -15.0, -22.6], [-24.9, -21.2, -20.9])
0.8629...
>>> mendelian_error([-15.5, -15.8, -19.7], [-11.8, -9.9, -22.9], [-69.7, -55.9, -58.3])
>>> mendelian_error([-3.4, -0, -2.9], [-0, -1.8, -23.0], [-6.7, 0.0, -10.7])
0.742...
>>> mendelian_error([34, 0, 29], [0, 18, 23], [67, 0, 107], pls=True)
0.74...
"""
if pls:
mother = [m / -10.0 for m in mother]
father = [f / -10.0 for f in father]
child = [c / -10.0 for c in child]
try:
M = rescale([10.**m for m in mother])
F = rescale([10.**f for f in father])
C = rescale([10.**c for c in child])
except LowGenotypeException:
return None
# by ref, and alt, we mean hom_ref, hom_alt
p_two_ref = M[0] * F[0]
p_two_het = M[1] * F[1]
p_two_alt = M[2] * F[2]
# only 1 of the parents is ...
p_one_ref = (M[0] + F[0])/2 - p_two_ref
p_one_het = (M[1] + F[1])/2 - p_two_het
p_one_alt = (M[2] + F[2])/2 - p_two_alt
# divide by 2 because parents independent.
# all options covered because, e.g. p(two_ref) == p(zero_alt)
assert abs(sum((p_one_ref, p_one_het, p_one_alt, p_two_ref, p_two_het, p_two_alt)) - 1) < 1e-4, \
abs(sum((p_one_ref, p_one_het, p_one_alt, p_two_ref, p_two_het, p_two_alt)) - 1)
##################
# Non-violations #
##################
# a. everyone is reference
a = p_two_ref * C[0]
# b. everyone is hom alt
b = p_two_alt * C[2]
# c. 1 het and 1 ref parent. child matches
c = p_one_het * p_one_ref * (C[0] + C[1])
# d. 1 het and 1 alt parent. child matches
d = p_one_het * p_one_alt * (C[1] + C[2])
# e. both parents hets. (child can be anything)
e = p_two_het
# f. one hom ref, one home alt. child is het
f = p_one_ref * p_one_alt * C[1]
#print a, b, c, d, e, f
p_not_error = a + b + c + d + e + f
return 1.0 - p_not_error
def xopen(f):
return gzip.open(f) if f.endswith(".gz") else sys.stdin if "-" == f else open(f)
def main(fh, father, mother, child):
for line in fh:
if line.startswith("##"):
print(line, end="")
continue
elif line.startswith("#CHROM"):
print("##INFO=<ID=MEP,Number=1,Type=Float,Description=\"probability of mendelian error\">")
print("##INFO=<ID=MER,Number=1,Type=Float,Description=\"log10 ratio of mendelian error\">")
fields = line.rstrip().split("\t")
samples = fields[9:]
idxs = [9 + samples.index(s) for s in (father, mother, child)]
print(line, end="")
continue
fields = line.rstrip().split("\t")
samples = [fields[i].split(":") for i in idxs]
fmt = fields[8].split(":")
if "PL" in fmt:
gli = fmt.index("PL")
opls = [s[gli].split(",") for s in samples]
gls = [[int(p)/-10. for p in pl] for pl in opls]
else:
gli = fmt.index("GL")
ogls = [s[gli].split(",") for s in samples]
gls = [[float(p) for g in gl] for gl in ogls]
for i, gl in enumerate(gls):
while sum(gls[i]) < -50:
gls[i] = [p / 10. for p in gls[i]]
p = mendelian_error(gls[0], gls[1], gls[2])
if p == 1:
mer = 100
elif p == 0:
mer = 0
elif p is None:
mer = None
else:
mer = log10(p / (1.0 - p))
if p < 1 - 1e-5 or p is None:
continue
fields[7] += ";MEP=%.8g" % (nan if p is None else p)
fields[7] += ";MER=%.8g" % (nan if p is None else mer)
print("\t".join(fields))
def test():
from random import randint
def gen3():
return [randint(-70, 1) / 10. for i in range(3)]
ps = []
for i in range(100000):
a, b, c = gen3(), gen3(), gen3()
ps.append(mendelian_error(a, b, c))
if ps[-1] > 0.999999:
print("mendelian error:", tuple(a), tuple(b), tuple(c))
elif ps[-1] < 0.00001:
print("expected :", tuple(a), tuple(b), tuple(c))
try:
import pylab as pl
pl.hist(ps, 50)
pl.show()
except ImportError:
pass
def _main():
if len(sys.argv) > 1 and sys.argv[1] == "test":
sys.exit(test())
elif len(sys.argv) != 5:
print(__doc__)
print("\nUsage: %s some.vcf father_id mother_id child_id > new.vcf\n" %
sys.argv[0])
sys.exit()
father, mother, child = sys.argv[2:]
main(xopen(sys.argv[1]), father, mother, child)
if __name__ == "__main__":
import doctest
sys.stderr.write(str(doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE, verbose=0)) + "\n")
_main()
|
python/jupyter_flex/tests/test_pkg.py | ocefpaf/jupyter-flex | 245 | 11137534 | <reponame>ocefpaf/jupyter-flex
import os
import pytest
import jupyter_flex
from jupyter_flex.config import settings
pytestmark = [pytest.mark.nondestructive, pytest.mark.pkg]
def test_import():
assert jupyter_flex.__version__ is not None
assert jupyter_flex.__version__ != "0.0.0"
assert len(jupyter_flex.__version__) > 0
def test_assets_included():
nbconvert = os.path.join(settings.templates_dir, "nbconvert", "flex")
assert os.path.exists(os.path.join(nbconvert, "conf.json"))
assert os.path.exists(os.path.join(nbconvert, "flex.j2"))
assert os.path.exists(os.path.join(nbconvert, "index.html.j2"))
static = os.path.join(nbconvert, "static")
assert os.path.exists(os.path.join(static, "favicon.png"))
assert os.path.exists(os.path.join(static, "jupyter-flex-embed.css"))
assert os.path.exists(os.path.join(static, "jupyter-flex-embed.js"))
assert os.path.exists(os.path.join(static, "jupyter-flex-embed.js.map"))
assert os.path.exists(os.path.join(static, "qgrid.js"))
assert os.path.exists(os.path.join(static, "require.min.js"))
voila = os.path.join(settings.templates_dir, "voila", "flex")
assert os.path.exists(os.path.join(voila, "404.html"))
assert os.path.exists(os.path.join(voila, "browser-open.html"))
assert os.path.exists(os.path.join(voila, "error.html"))
assert os.path.exists(os.path.join(voila, "index.html.j2"))
assert os.path.exists(os.path.join(voila, "page.html"))
assert os.path.exists(os.path.join(voila, "tree.html"))
|
scripts/test/test_import_point_cloud.py | sparsebase/facebook360_dep | 221 | 11137538 | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Unit test class for ImportPointCloud.
This class subclasses the DepTest class, which provides some utility functions around
the base unittest.TestCase class. This script can be run as part of the overall test suite
via run_tests.py or standalone.
Example:
To run the test independently (which will produce the standard Python unittest success
output), simply run:
$ python test_import_point_cloud.py \
--binary_dir=/path/to/facebook360_dep/build/bin \
--dataset_root=s3://example/dataset
"""
import os
from .test_master_class import DepTest, generic_main
class ImportPointCloudTest(DepTest):
"""Unit test class for ImportPointCloud.
Attributes:
name (str): String representation of the class name.
"""
def test_run(self):
"""Run test for ImportPointCloud.
Raises:
AssertionError: If incorrect results are produced.
"""
point_cloud_fn = "point_cloud.xyz"
projected_disparity_dir = "projected_disparity"
self.io_args.point_cloud = os.path.join(
self.io_args.testing_dir, point_cloud_fn
)
self.io_args.output = os.path.join(
self.io_args.testing_dir, projected_disparity_dir
)
self.run_app("ImportPointCloud")
self.check_against_truth(
truth=os.path.join(self.io_args.truth_dir, projected_disparity_dir),
output=self.io_args.output,
)
if __name__ == "__main__":
generic_main([ImportPointCloudTest])
|
tools/codegen-diff-revisions.py | 82marbag/smithy-rs | 125 | 11137566 | <reponame>82marbag/smithy-rs
#!/usr/bin/env python3
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
#
# This script can be run and tested locally. To do so, you should check out
# a second smithy-rs repository so that you can work on the script and still
# run it without it immediately bailing for an unclean working tree.
#
# Example:
# `smithy-rs/` - the main repo you're working out of
# `test/smithy-rs/` - the repo you're testing against
#
# ```
# $ cd test/smithy-rs
# $ ../../smithy-rs/tools/codegen-diff-revisions.py . <some commit hash to diff against>
# ```
#
# It will diff the generated code from HEAD against any commit hash you feed it. If you want to test
# a specific range, change the HEAD of the test repository.
#
# This script requires `diff2html-cli` to be installed from NPM:
# ```
# $ npm install -g [email protected]
# ```
# Make sure the local version matches the version referenced from the GitHub Actions workflow.
import os
import sys
import subprocess
import tempfile
import shlex
HEAD_BRANCH_NAME = "__tmp-localonly-head"
BASE_BRANCH_NAME = "__tmp-localonly-base"
OUTPUT_PATH = "tmp-codegen-diff/"
COMMIT_AUTHOR_NAME = "GitHub Action (generated code preview)"
COMMIT_AUTHOR_EMAIL = "<EMAIL>"
CDN_URL = "https://d2luzm2xt3nokh.cloudfront.net"
def running_in_github_actions():
return os.environ.get("GITHUB_ACTIONS") == "true"
def main():
if len(sys.argv) != 3:
eprint("Usage: codegen-diff-revisions.py <repository root> <base commit sha>")
sys.exit(1)
repository_root = sys.argv[1]
base_commit_sha = sys.argv[2]
os.chdir(repository_root)
head_commit_sha = get_cmd_output("git rev-parse HEAD")
# Make sure the working tree is clean
if get_cmd_status("git diff --quiet") != 0:
eprint("working tree is not clean. aborting")
sys.exit(1)
if running_in_github_actions():
eprint(f"Fetching base revision {base_commit_sha} from GitHub...")
run(f"git fetch --no-tags --progress --no-recurse-submodules --depth=1 origin {base_commit_sha}")
# Generate code for HEAD
eprint(f"Creating temporary branch with generated code for the HEAD revision {head_commit_sha}")
run(f"git checkout {head_commit_sha} -b {HEAD_BRANCH_NAME}")
generate_and_commit_generated_code(head_commit_sha)
# Generate code for base
eprint(f"Creating temporary branch with generated code for the base revision {base_commit_sha}")
run(f"git checkout {base_commit_sha} -b {BASE_BRANCH_NAME}")
generate_and_commit_generated_code(base_commit_sha)
bot_message = make_diffs(base_commit_sha, head_commit_sha)
write_to_file(f"{OUTPUT_PATH}/bot-message", bot_message)
# Clean-up that's only really useful when testing the script in local-dev
if not running_in_github_actions():
run("git checkout main")
run(f"git branch -D {BASE_BRANCH_NAME}")
run(f"git branch -D {HEAD_BRANCH_NAME}")
def generate_and_commit_generated_code(revision_sha):
# Clean the build artifacts before continuing
run("rm -rf aws/sdk/build")
# Generate code
run("./gradlew --rerun-tasks :aws:sdk:assemble")
run("./gradlew --rerun-tasks :codegen-server-test:assemble")
# Move generated code into codegen-diff/ directory
run(f"rm -rf {OUTPUT_PATH}")
run(f"mkdir {OUTPUT_PATH}")
run(f"mv aws/sdk/build/aws-sdk {OUTPUT_PATH}")
run(f"mv codegen-server-test/build/smithyprojections/codegen-server-test {OUTPUT_PATH}")
# Clean up the server-test folder
run(f"rm -rf {OUTPUT_PATH}/codegen-server-test/source")
run(f"find {OUTPUT_PATH}/codegen-server-test | "
f"grep -E 'smithy-build-info.json|sources/manifest|model.json' | "
f"xargs rm -f", shell=True)
run(f"git add -f {OUTPUT_PATH}")
run(f"git -c 'user.name=GitHub Action (generated code preview)' "
f"-c 'user.name={COMMIT_AUTHOR_NAME}' "
f"-c 'user.email={COMMIT_AUTHOR_EMAIL}' "
f"commit --no-verify -m 'Generated code for {revision_sha}' --allow-empty")
# Writes an HTML template for diff2html so that we can add contextual information
def write_html_template(title, subtitle, tmp_file):
tmp_file.writelines(map(lambda line: line.encode(), [
"<!doctype html>",
"<html>",
"<head>",
' <metadata charset="utf-8">',
f' <title>Codegen diff for the {title}: {subtitle}</title>',
' <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/9.9.0/styles/github.min.css" / >',
' <!--diff2html-css-->',
' <!--diff2html-js-ui-->',
' <script>',
' document.addEventListener("DOMContentLoaded", () => {',
' const targetElement = document.getElementById("diff");',
' const diff2htmlUi = new Diff2HtmlUI(targetElement);',
' //diff2html-fileListToggle',
' //diff2html-synchronisedScroll',
' //diff2html-highlightCode',
' });',
' </script>',
"</head>",
"<body>",
f" <h1>Codegen diff for the {title}</h1>",
f" <p>{subtitle}</p>",
' <div id="diff">',
' <!--diff2html-diff-->',
' </div>',
"</body>",
"</html>",
]))
tmp_file.flush()
def make_diff(title, path_to_diff, base_commit_sha, head_commit_sha, suffix, whitespace):
whitespace_flag = "" if whitespace else "-b"
diff_exists = get_cmd_status(f"git diff --quiet {whitespace_flag} "
f"{BASE_BRANCH_NAME} {HEAD_BRANCH_NAME} -- {path_to_diff}")
if diff_exists == 0:
eprint(f"No diff output for {base_commit_sha}..{head_commit_sha}")
return None
else:
run(f"mkdir -p {OUTPUT_PATH}/{base_commit_sha}/{head_commit_sha}")
dest_path = f"{base_commit_sha}/{head_commit_sha}/diff-{suffix}.html"
whitespace_context = "" if whitespace else "(ignoring whitespace)"
with tempfile.NamedTemporaryFile() as tmp_file:
write_html_template(title, f"rev. {head_commit_sha} {whitespace_context}", tmp_file)
# Generate HTML diff. This uses the diff2html-cli, which defers to `git diff` under the hood.
# All arguments after the first `--` go to the `git diff` command.
diff_cmd = f"diff2html -s line -f html -d word -i command --hwt "\
f"{tmp_file.name} -F {OUTPUT_PATH}/{dest_path} -- "\
f"-U20 {whitespace_flag} {BASE_BRANCH_NAME} {HEAD_BRANCH_NAME} -- {path_to_diff}"
eprint(f"Running diff cmd: {diff_cmd}")
run(diff_cmd)
return dest_path
def diff_link(diff_text, empty_diff_text, diff_location, alternate_text, alternate_location):
if diff_location is None:
return empty_diff_text
else:
return f"[{diff_text}]({CDN_URL}/codegen-diff/{diff_location}) ([{alternate_text}]({CDN_URL}/codegen-diff/{alternate_location}))"
def make_diffs(base_commit_sha, head_commit_sha):
sdk_ws = make_diff("AWS SDK", f"{OUTPUT_PATH}/aws-sdk", base_commit_sha,
head_commit_sha, "aws-sdk", whitespace=True)
sdk_nows = make_diff("AWS SDK", f"{OUTPUT_PATH}/aws-sdk", base_commit_sha, head_commit_sha,
"aws-sdk-ignore-whitespace", whitespace=False)
server_ws = make_diff("Server Test", f"{OUTPUT_PATH}/codegen-server-test", base_commit_sha,
head_commit_sha, "server-test", whitespace=True)
server_nows = make_diff("Server Test", f"{OUTPUT_PATH}/codegen-server-test", base_commit_sha,
head_commit_sha, "server-test-ignore-whitespace", whitespace=False)
sdk_links = diff_link('AWS SDK', 'No codegen difference in the AWS SDK',
sdk_ws, 'ignoring whitespace', sdk_nows)
server_links = diff_link('Server Test', 'No codegen difference in the Server Test',
server_ws, 'ignoring whitespace', server_nows)
# Save escaped newlines so that the GitHub Action script gets the whole message
return "A new generated diff is ready to view.\\n"\
f"- {sdk_links}\\n"\
f"- {server_links}\\n"
def write_to_file(path, text):
with open(path, "w") as file:
file.write(text)
# Prints to stderr
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# Runs a shell command
def run(command, shell=False):
if not shell:
command = shlex.split(command)
subprocess.run(command, stdout=sys.stderr, stderr=sys.stderr, shell=shell, check=True)
# Returns the output from a shell command. Bails if the command failed
def get_cmd_output(command):
result = subprocess.run(shlex.split(command), capture_output=True, check=True)
return result.stdout.decode("utf-8").strip()
# Runs a shell command and returns its exit status
def get_cmd_status(command):
return subprocess.run(command, capture_output=True, shell=True).returncode
if __name__ == "__main__":
main()
|
test_integration/test_skill_tellina.py | cohmoti/clai | 391 | 11137577 | <reponame>cohmoti/clai
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
from test_integration.contract_skills import ContractSkills
class TestSkillTellina(ContractSkills):
def get_skill_name(self):
return 'tellina'
def get_commands_to_execute(self):
return ['pwd',
'clai "tellina" exit terminal',
'clai "tellina" show me all files']
def get_commands_expected(self):
return ['/opt/IBM/clai',
'exit',
'find .']
|
sagemaker-python-sdk/tensorflow_serving_container/sample_utils.py | pollyrolly/amazon-sagemaker-examples | 2,610 | 11137581 | <reponame>pollyrolly/amazon-sagemaker-examples
import cv2
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
def tfhub_to_savedmodel(
model_name, export_path, uri_pattern="https://tfhub.dev/google/imagenet/{}/classification/2"
):
"""Download a model from TensorFlow Hub, add inputs and outputs
suitable for serving inference requests, and export the resulting
graph as a SavedModel. This function should work for most
image classification model on TensorFlow Hub.
Args:
model_name (str): The model name (e.g. mobilenet_v2_140_224)
export_path (str): The exported model will be saved at <export_path>/<model_name>
uri_pattern (str): Optional. The model name is combined with this
pattern to form a TensorFlow Hub uri. The default value works for MobileNetV2,
but a different pattern may be needed for other models.
Returns:
str: The path to the exported SavedModel (including model_name and version).
"""
# the model will output the topk predicted classes and probabilities
topk = 3
model_path = "{}/{}/00000001".format(export_path, model_name)
tfhub_uri = uri_pattern.format(model_name)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
module = hub.Module(tfhub_uri)
input_params = module.get_input_info_dict()
dtype = input_params["images"].dtype
shape = input_params["images"].get_shape()
# define the model inputs
inputs = {"images": tf.compat.v1.placeholder(dtype, shape, "images")}
# define the model outputs
# we want the class ids and probabilities for the top 3 classes
logits = module(inputs["images"])
softmax = tf.nn.softmax(logits, name=None)
probs, classes = tf.nn.top_k(softmax, k=topk, sorted=True, name=None)
outputs = {"classes": classes, "probabilities": probs}
# export the model
sess.run([tf.compat.v1.global_variables_initializer(), tf.compat.v1.tables_initializer()])
tf.compat.v1.saved_model.simple_save(sess, model_path, inputs=inputs, outputs=outputs)
return model_path
def image_file_to_tensor(path):
"""Reads an image file and coverts it to a tensor (ndarray).
Resizing of input is done (224x224 for the mobilenet_v2_140_224 model).
Args:
path (str): The file name or path to the image file.
"""
image = cv2.imread(path)
image = cv2.resize(image, dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.asarray(image)
image = cv2.normalize(image.astype("float"), None, 0, 1, cv2.NORM_MINMAX)
image = np.expand_dims(image, axis=0)
return image
def add_imagenet_labels(prediction_result):
"""Add imagenet class labels to the prediction result. The
prediction_result argument will be modified in place.
"""
# read the labels from a file
labels = []
with open("labels.txt", "r") as f:
labels = [l.strip() for l in f]
# add labels to the result dict
for pred in prediction_result["predictions"]:
prediction_labels = [labels[x - 1] for x in pred["classes"]]
pred["labels"] = prediction_labels
def print_probabilities_and_labels(labelled_result):
"""Print the labelled results." """
for pred in labelled_result["predictions"]:
for i in range(0, len(pred["labels"])):
print(
"{:1.7f} {}".format(
pred["probabilities"][i],
pred["labels"][i],
)
)
print()
|
Tools/heatcalculator.py | zhyinty/HeliumRain | 646 | 11137607 | <reponame>zhyinty/HeliumRain<gh_stars>100-1000
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import configparser
import math
import sys
OVERHEAT_TEMPERATURE = 1200
BURN_TEMPERATURE = 1500
SOLAR_POWER = 3.094
def print_u(str):
bytes_str = (str + "\n").encode('utf8')
sys.stdout.buffer.write(bytes_str)
class Ship:
name = "Unnamed"
min_heatsink_ratio = 0.0
max_heatsink = 0.0
min_heatsink = 0.0
passive_power = 0.0
active_power = 0.0
boosting_power = 0.0
firing_power = 0.0
heat_capacity = 0.0
max_passive_equilibrium = 0.0
max_active_equilibrium = 0.0
max_boosting_equilibrium = 0.0
max_firing_equilibrium = 0.0
max_all_equilibrium = 0.0
min_passive_equilibrium = 0.0
min_active_equilibrium = 0.0
min_boosting_equilibrium = 0.0
min_firing_equilibrium = 0.0
min_all_equilibrium = 0.0
def __init__(self, path):
config = configparser.ConfigParser()
config.read(path)
for section in config.sections():
count = 1.
if "count" in config[section]:
count = float(config[section]["count"])
for key in config[section]:
value = config[section][key]
if key == "shipname":
self.name = value
elif key == "minheatsinkratio":
self.min_heatsink_ratio = float(value)
elif key == "maxheatsink":
self.max_heatsink += count * float(value)
self.min_heatsink += count * float(value) * self.min_heatsink_ratio
elif key == "heatcapacity":
self.heat_capacity += count * float(value)
elif key == "passivepower":
self.passive_power += count * float(value)
elif key == "activepower":
self.active_power += count * float(value)
elif key == "boostingpower":
self.boosting_power += count * float(value)
elif key == "firingpower":
self.firing_power += count * float(value)
elif key == "count":
# Already treated
pass
else:
print("unknown key "+key)
def compute_equilibrium(self, power, surface):
# Radiation in KJ = surface * 5.670373e-8 * FMath::Pow(Temperature, 4) / 1000
# Production in KJ = power
# Equilibrium when production equals radiation
return math.pow(1000 * power / (surface * 5.60373e-8), 1/4)
def compute_boost_duration(self, initial_temperature, final_temperature, power, surface, heat_capacity):
# Radiation in KJ = surface * 5.670373e-8 * FMath::Pow(Temperature, 4) / 1000
# Production in KJ = power
# temperature variation is : dT/dt = (1000 * power - surface * 5.670373e-8 * FMath::Pow(T, 4)) / heat_capacity
# T(t) = 1000 * power t - (surface * k * FMath::Pow(T, 5)/(5 * heat_capacity)
if self.compute_equilibrium(power, surface) < final_temperature:
# The final temperature will never be reach
return -1;
delta_seconds = 0.001
time = 0.0;
heat = initial_temperature * heat_capacity
temperature = heat / heat_capacity
while temperature < final_temperature:
heat = heat + (power - surface * 5.670373e-8 * math.pow(temperature, 4) / 1000) * delta_seconds
time = time + delta_seconds
temperature = heat / heat_capacity
return time
def compute_cooling_duration(self, initial_temperature, final_temperature, power, surface, heat_capacity):
# Radiation in KJ = surface * 5.670373e-8 * FMath::Pow(Temperature, 4) / 1000
# Production in KJ = power
# temperature variation is : dT/dt = (1000 * power - surface * 5.670373e-8 * FMath::Pow(T, 4)) / heat_capacity
# T(t) = 1000 * power t - (surface * k * FMath::Pow(T, 5)/(5 * heat_capacity)
if self.compute_equilibrium(power, surface) > final_temperature:
# The final temperature will never be reach
return -1;
delta_seconds = 0.001
time = 0.0;
heat = initial_temperature * heat_capacity
temperature = heat / heat_capacity
while temperature > final_temperature:
heat = heat + (power - surface * 5.670373e-8 * math.pow(temperature, 4) / 1000) * delta_seconds
time = time + delta_seconds
temperature = heat / heat_capacity
return time
def compute(self):
max_solar_power = self.max_heatsink * SOLAR_POWER * 0.5
active_max_usage = 0.26
self.max_passive_equilibrium = self.compute_equilibrium(self.passive_power + max_solar_power, self.max_heatsink)
self.max_active_equilibrium = self.compute_equilibrium(self.passive_power + self.active_power * active_max_usage + max_solar_power, self.max_heatsink)
self.max_boosting_equilibrium = self.compute_equilibrium(self.passive_power + self.active_power * active_max_usage + self.boosting_power + max_solar_power, self.max_heatsink)
self.max_firing_equilibrium = self.compute_equilibrium(self.passive_power + self.firing_power + max_solar_power, self.max_heatsink)
self.max_all_equilibrium = self.compute_equilibrium(self.passive_power + self.active_power * active_max_usage + self.boosting_power + self.firing_power + max_solar_power, self.max_heatsink)
min_solar_power = self.min_heatsink * SOLAR_POWER * 0.5
self.min_passive_equilibrium = self.compute_equilibrium(self.passive_power + min_solar_power, self.min_heatsink)
self.min_active_equilibrium = self.compute_equilibrium(self.passive_power + self.active_power * active_max_usage + min_solar_power, self.min_heatsink)
self.min_boosting_equilibrium = self.compute_equilibrium(self.passive_power + self.active_power * active_max_usage + self.boosting_power + min_solar_power, self.min_heatsink)
self.min_firing_equilibrium = self.compute_equilibrium(self.passive_power + self.firing_power + min_solar_power, self.min_heatsink)
self.min_all_equilibrium = self.compute_equilibrium(self.passive_power + self.active_power * active_max_usage + self.boosting_power + self.firing_power + min_solar_power, self.min_heatsink)
self.passive_boost_duration = self.compute_boost_duration(self.max_passive_equilibrium, OVERHEAT_TEMPERATURE, self.passive_power + self.active_power * active_max_usage + self.boosting_power + max_solar_power, self.max_heatsink, self.heat_capacity)
self.active_boost_duration = self.compute_boost_duration(self.max_active_equilibrium, OVERHEAT_TEMPERATURE, self.passive_power + self.active_power * active_max_usage + self.boosting_power + max_solar_power, self.max_heatsink, self.heat_capacity)
self.passive_firing_duration = self.compute_boost_duration(self.max_passive_equilibrium, OVERHEAT_TEMPERATURE, self.passive_power + self.firing_power + max_solar_power, self.max_heatsink, self.heat_capacity)
self.active_firing_duration = self.compute_boost_duration(self.max_active_equilibrium, OVERHEAT_TEMPERATURE, self.passive_power + self.active_power * active_max_usage + self.firing_power + max_solar_power, self.max_heatsink, self.heat_capacity)
self.burning_to_overheat_cooling = self.compute_cooling_duration(BURN_TEMPERATURE, OVERHEAT_TEMPERATURE, self.passive_power + max_solar_power, self.max_heatsink, self.heat_capacity)
self.boosting_to_active_cooling = self.compute_cooling_duration(self.max_boosting_equilibrium, self.max_active_equilibrium, self.passive_power + max_solar_power, self.max_heatsink, self.heat_capacity)
def dump(self):
print("-------------------")
print("Ship " + self.name)
print("-------------------")
print_u("Heat capacity: "+ str(self.heat_capacity) + " KJ/°K")
print_u("Solar power: "+ str(SOLAR_POWER) + " KW/m²")
print_u("Min heatsink ratio: "+ str(self.min_heatsink_ratio))
print("Heatsink")
print_u(" - Maximum: "+ str(self.max_heatsink) + " m²")
print_u(" - Minimum: "+ str(self.min_heatsink) + " m²")
print("Heat production")
print(" - Passive: "+ str(self.passive_power) + " KW")
print(" - Active: "+ str(self.active_power) + " KW")
print(" - Boosting: "+ str(self.boosting_power) + " KW")
print(" - Firing: "+ str(self.firing_power) + " KW")
print("Equilibium at max heatsink")
print_u(" - Passive: "+ str(self.max_passive_equilibrium) + " °K")
print_u(" - Active: "+ str(self.max_active_equilibrium) + " °K")
print_u(" - Boosting: "+ str(self.max_boosting_equilibrium) + " °K")
print_u(" - Firing: "+ str(self.max_firing_equilibrium) + " °K")
print_u(" - All: "+ str(self.max_all_equilibrium) + " °K")
print("Equilibium at min heatsink")
print_u(" - Passive: "+ str(self.min_passive_equilibrium) + " °K")
print_u(" - Active: "+ str(self.min_active_equilibrium) + " °K")
print_u(" - Boosting: "+ str(self.min_boosting_equilibrium) + " °K")
print_u(" - Firing: "+ str(self.min_firing_equilibrium) + " °K")
print_u(" - All: "+ str(self.min_all_equilibrium) + " °K")
print("Usage duration")
print(" - Boosting from passive: "+ (str(self.passive_boost_duration) + " s" if self.passive_boost_duration > 0 else "No overheat"))
print(" - Boosting from active: "+ (str(self.active_boost_duration) + " s" if self.active_boost_duration > 0 else "No overheat"))
print(" - Firing from passive: "+ (str(self.passive_firing_duration) + " s" if self.passive_firing_duration > 0 else "No overheat"))
print(" - Firing from active: "+ (str(self.active_firing_duration) + " s" if self.active_firing_duration > 0 else "No overheat"))
print("Cooling duration")
print(" - Burning to Overheat: "+ str(self.burning_to_overheat_cooling) + " s")
print(" - Boosting to active: "+ str(self.boosting_to_active_cooling) + " s")
ship = Ship("ghoul.ship")
ship.compute()
ship.dump()
ship = Ship("orca.ship")
ship.compute()
ship.dump()
ship = Ship("omen.ship")
ship.compute()
ship.dump()
ship = Ship("invader.ship")
ship.compute()
ship.dump()
|
uberduck_ml_dev/_nbdev.py | Cris140/uberduck-ml-dev | 167 | 11137618 | <filename>uberduck_ml_dev/_nbdev.py
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"ensure_speaker_table": "data.cache.ipynb",
"STANDARD_MULTISPEAKER": "data.parse.ipynb",
"STANDARD_SINGLESPEAKER": "data.parse.ipynb",
"VCTK": "data.parse.ipynb",
"word_frequencies": "data.statistics.ipynb",
"create_wordcloud": "data.statistics.ipynb",
"count_frequency": "data.statistics.ipynb",
"pace_character": "data.statistics.ipynb",
"pace_phoneme": "data.statistics.ipynb",
"get_sample_format": "data.statistics.ipynb",
"AbsoluteMetrics": "data.statistics.ipynb",
"pad_sequences": "data_loader.ipynb",
"prepare_input_sequence": "data_loader.ipynb",
"oversample": "data_loader.ipynb",
"TextMelDataset": "data_loader.ipynb",
"TextMelCollate": "data_loader.ipynb",
"TextAudioSpeakerLoader": "data_loader.ipynb",
"TextAudioSpeakerCollate": "data_loader.ipynb",
"DistributedBucketSampler": "data_loader.ipynb",
"tts": "e2e.ipynb",
"rhythm_transfer": "e2e.ipynb",
"get_summary_statistics": "exec.dataset_statistics.ipynb",
"calculate_statistics": "exec.dataset_statistics.ipynb",
"generate_markdown": "exec.dataset_statistics.ipynb",
"parse_args": "utils.exec.ipynb",
"run": "exec.train_vits.ipynb",
"FORMATS": "exec.parse_data.ipynb",
"batch": "exec.preprocess_vits.ipynb",
"flatten": "exec.preprocess_vits.ipynb",
"write_filenames": "exec.split_train_val.ipynb",
"VITSEncoder": "models.attentions.ipynb",
"Decoder": "models.tacotron2.ipynb",
"MultiHeadAttention": "models.gradtts.ipynb",
"FFN": "models.gradtts.ipynb",
"TTSModel": "models.base.ipynb",
"DEFAULTS": "trainer.tacotron2.ipynb",
"Conv1d": "models.common.ipynb",
"LinearNorm": "models.common.ipynb",
"LocationLayer": "models.common.ipynb",
"Attention": "models.common.ipynb",
"STFT": "models.common.ipynb",
"MelSTFT": "models.common.ipynb",
"ReferenceEncoder": "models.common.ipynb",
"STL": "models.common.ipynb",
"GST": "models.common.ipynb",
"LayerNorm": "models.gradtts.ipynb",
"Flip": "models.common.ipynb",
"Log": "models.common.ipynb",
"ElementwiseAffine": "models.common.ipynb",
"DDSConv": "models.common.ipynb",
"ConvFlow": "models.common.ipynb",
"WN": "models.common.ipynb",
"ResidualCouplingLayer": "models.common.ipynb",
"ResBlock1": "vocoders.hifigan.ipynb",
"ResBlock2": "vocoders.hifigan.ipynb",
"LRELU_SLOPE": "vocoders.hifigan.ipynb",
"fix_len_compatibility_text_edit": "models.editts.ipynb",
"EdiTTS": "models.editts.ipynb",
"BaseModule": "models.gradtts.ipynb",
"Mish": "models.gradtts.ipynb",
"Upsample": "models.gradtts.ipynb",
"Downsample": "models.gradtts.ipynb",
"Rezero": "models.gradtts.ipynb",
"Block": "models.gradtts.ipynb",
"ResnetBlock": "models.gradtts.ipynb",
"LinearAttention": "models.gradtts.ipynb",
"Residual": "models.gradtts.ipynb",
"SinusoidalPosEmb": "models.gradtts.ipynb",
"GradLogPEstimator2d": "models.gradtts.ipynb",
"get_noise": "models.gradtts.ipynb",
"Diffusion": "models.gradtts.ipynb",
"sequence_mask": "utils.utils.ipynb",
"fix_len_compatibility": "models.gradtts.ipynb",
"convert_pad_shape": "utils.utils.ipynb",
"generate_path": "utils.utils.ipynb",
"duration_loss": "models.gradtts.ipynb",
"ConvReluNorm": "models.gradtts.ipynb",
"DurationPredictor": "models.vits.ipynb",
"Encoder": "models.tacotron2.ipynb",
"TextEncoder": "models.vits.ipynb",
"GradTTS": "models.gradtts.ipynb",
"Postnet": "models.tacotron2.ipynb",
"Prenet": "models.tacotron2.ipynb",
"Mellotron": "models.mellotron.ipynb",
"config": "trainer.tacotron2.ipynb",
"Tacotron2": "models.tacotron2.ipynb",
"SPECIAL_PREFIX": "models.torchmoji.ipynb",
"SPECIAL_TOKENS": "models.torchmoji.ipynb",
"NB_TOKENS": "models.torchmoji.ipynb",
"NB_EMOJI_CLASSES": "models.torchmoji.ipynb",
"FINETUNING_METHODS": "models.torchmoji.ipynb",
"FINETUNING_METRICS": "models.torchmoji.ipynb",
"EMOJIS": "models.torchmoji.ipynb",
"LSTMHardSigmoid": "models.torchmoji.ipynb",
"AutogradRNN": "models.torchmoji.ipynb",
"Recurrent": "models.torchmoji.ipynb",
"variable_recurrent_factory": "models.torchmoji.ipynb",
"VariableRecurrent": "models.torchmoji.ipynb",
"VariableRecurrentReverse": "models.torchmoji.ipynb",
"StackedRNN": "models.torchmoji.ipynb",
"LSTMCell": "models.torchmoji.ipynb",
"hard_sigmoid": "models.torchmoji.ipynb",
"tokenize": "models.torchmoji.ipynb",
"RE_NUM": "models.torchmoji.ipynb",
"RE_WORD": "models.torchmoji.ipynb",
"RE_WHITESPACE": "models.torchmoji.ipynb",
"RE_ANY": "models.torchmoji.ipynb",
"RE_COMB": "models.torchmoji.ipynb",
"RE_CONTRACTIONS": "models.torchmoji.ipynb",
"TITLES": "models.torchmoji.ipynb",
"RE_TITLES": "models.torchmoji.ipynb",
"SYMBOLS": "models.torchmoji.ipynb",
"RE_SYMBOL": "models.torchmoji.ipynb",
"SPECIAL_SYMBOLS": "models.torchmoji.ipynb",
"RE_ABBREVIATIONS": "models.torchmoji.ipynb",
"RE_HASHTAG": "models.torchmoji.ipynb",
"RE_MENTION": "models.torchmoji.ipynb",
"RE_URL": "models.torchmoji.ipynb",
"RE_EMAIL": "models.torchmoji.ipynb",
"RE_HEART": "models.torchmoji.ipynb",
"EMOTICONS_START": "models.torchmoji.ipynb",
"EMOTICONS_MID": "models.torchmoji.ipynb",
"EMOTICONS_END": "models.torchmoji.ipynb",
"EMOTICONS_EXTRA": "models.torchmoji.ipynb",
"RE_EMOTICON": "models.torchmoji.ipynb",
"RE_EMOJI": "models.torchmoji.ipynb",
"TOKENS": "models.torchmoji.ipynb",
"IGNORED": "models.torchmoji.ipynb",
"RE_PATTERN": "models.torchmoji.ipynb",
"TorchmojiAttention": "models.torchmoji.ipynb",
"VocabBuilder": "models.torchmoji.ipynb",
"MasterVocab": "models.torchmoji.ipynb",
"all_words_in_sentences": "models.torchmoji.ipynb",
"extend_vocab_in_file": "models.torchmoji.ipynb",
"extend_vocab": "models.torchmoji.ipynb",
"SentenceTokenizer": "models.torchmoji.ipynb",
"coverage": "models.torchmoji.ipynb",
"torchmoji_feature_encoding": "models.torchmoji.ipynb",
"torchmoji_emojis": "models.torchmoji.ipynb",
"torchmoji_transfer": "models.torchmoji.ipynb",
"TorchMoji": "models.torchmoji.ipynb",
"load_specific_weights": "models.torchmoji.ipynb",
"load_benchmark": "models.torchmoji.ipynb",
"calculate_batchsize_maxlen": "models.torchmoji.ipynb",
"freeze_layers": "models.torchmoji.ipynb",
"change_trainable": "models.torchmoji.ipynb",
"find_f1_threshold": "models.torchmoji.ipynb",
"finetune": "models.torchmoji.ipynb",
"tune_trainable": "models.torchmoji.ipynb",
"evaluate_using_weighted_f1": "models.torchmoji.ipynb",
"evaluate_using_acc": "models.torchmoji.ipynb",
"chain_thaw": "models.torchmoji.ipynb",
"train_by_chain_thaw": "models.torchmoji.ipynb",
"calc_loss": "models.torchmoji.ipynb",
"fit_model": "models.torchmoji.ipynb",
"get_data_loader": "models.torchmoji.ipynb",
"DeepMojiDataset": "models.torchmoji.ipynb",
"DeepMojiBatchSampler": "models.torchmoji.ipynb",
"relabel": "models.torchmoji.ipynb",
"class_avg_finetune": "models.torchmoji.ipynb",
"prepare_labels": "models.torchmoji.ipynb",
"prepare_generators": "models.torchmoji.ipynb",
"class_avg_tune_trainable": "models.torchmoji.ipynb",
"class_avg_chainthaw": "models.torchmoji.ipynb",
"read_english": "models.torchmoji.ipynb",
"read_wanted_emojis": "models.torchmoji.ipynb",
"read_non_english_users": "models.torchmoji.ipynb",
"is_special_token": "models.torchmoji.ipynb",
"mostly_english": "models.torchmoji.ipynb",
"correct_length": "models.torchmoji.ipynb",
"punct_word": "models.torchmoji.ipynb",
"load_non_english_user_set": "models.torchmoji.ipynb",
"non_english_user": "models.torchmoji.ipynb",
"separate_emojis_and_text": "models.torchmoji.ipynb",
"extract_emojis": "models.torchmoji.ipynb",
"remove_variation_selectors": "models.torchmoji.ipynb",
"shorten_word": "models.torchmoji.ipynb",
"detect_special_tokens": "models.torchmoji.ipynb",
"process_word": "models.torchmoji.ipynb",
"remove_control_chars": "models.torchmoji.ipynb",
"convert_nonbreaking_space": "models.torchmoji.ipynb",
"convert_linebreaks": "models.torchmoji.ipynb",
"AtMentionRegex": "models.torchmoji.ipynb",
"urlRegex": "models.torchmoji.ipynb",
"VARIATION_SELECTORS": "models.torchmoji.ipynb",
"ALL_CHARS": "models.torchmoji.ipynb",
"CONTROL_CHARS": "models.torchmoji.ipynb",
"CONTROL_CHAR_REGEX": "models.torchmoji.ipynb",
"WordGenerator": "models.torchmoji.ipynb",
"TweetWordGenerator": "models.torchmoji.ipynb",
"RETWEETS_RE": "models.torchmoji.ipynb",
"URLS_RE": "models.torchmoji.ipynb",
"MENTION_RE": "models.torchmoji.ipynb",
"ALLOWED_CONVERTED_UNICODE_PUNCTUATION": "models.torchmoji.ipynb",
"TorchMojiInterface": "models.torchmoji.ipynb",
"piecewise_rational_quadratic_transform": "models.transforms.ipynb",
"searchsorted": "models.transforms.ipynb",
"unconstrained_rational_quadratic_spline": "models.transforms.ipynb",
"rational_quadratic_spline": "models.transforms.ipynb",
"DEFAULT_MIN_BIN_WIDTH": "models.transforms.ipynb",
"DEFAULT_MIN_BIN_HEIGHT": "models.transforms.ipynb",
"DEFAULT_MIN_DERIVATIVE": "models.transforms.ipynb",
"StochasticDurationPredictor": "models.vits.ipynb",
"ResidualCouplingBlock": "models.vits.ipynb",
"PosteriorEncoder": "models.vits.ipynb",
"Generator": "vocoders.hifigan.ipynb",
"DiscriminatorP": "vocoders.hifigan.ipynb",
"DiscriminatorS": "vocoders.hifigan.ipynb",
"MultiPeriodDiscriminator": "vocoders.hifigan.ipynb",
"SynthesizerTrn": "models.vits.ipynb",
"get_alignment_metrics": "monitoring.statistics.ipynb",
"CMUDict": "text.cmudict.ipynb",
"valid_symbols": "text.cmudict.ipynb",
"symbols_portuguese": "text.symbols.ipynb",
"PORTUGUESE_SYMBOLS": "text.symbols.ipynb",
"symbols_polish": "text.symbols.ipynb",
"POLISH_SYMBOLS": "text.symbols.ipynb",
"symbols_dutch": "text.symbols.ipynb",
"DUTCH_SYMBOLS": "text.symbols.ipynb",
"symbols_spanish": "text.symbols.ipynb",
"SPANISH_SYMBOLS": "text.symbols.ipynb",
"symbols": "text.symbols.ipynb",
"symbols_nvidia_taco2": "text.symbols.ipynb",
"symbols_with_ipa": "text.symbols.ipynb",
"grad_tts_symbols": "text.symbols.ipynb",
"DEFAULT_SYMBOLS": "text.symbols.ipynb",
"IPA_SYMBOLS": "text.symbols.ipynb",
"NVIDIA_TACO2_SYMBOLS": "text.symbols.ipynb",
"GRAD_TTS_SYMBOLS": "text.symbols.ipynb",
"SYMBOL_SETS": "text.symbols.ipynb",
"symbols_to_sequence": "text.symbols.ipynb",
"arpabet_to_sequence": "text.symbols.ipynb",
"should_keep_symbol": "text.symbols.ipynb",
"symbol_to_id": "text.symbols.ipynb",
"id_to_symbol": "text.symbols.ipynb",
"curly_re": "text.symbols.ipynb",
"words_re": "text.symbols.ipynb",
"normalize_numbers": "text.util.ipynb",
"expand_abbreviations": "text.util.ipynb",
"expand_numbers": "text.util.ipynb",
"lowercase": "text.util.ipynb",
"collapse_whitespace": "text.util.ipynb",
"convert_to_ascii": "text.util.ipynb",
"convert_to_arpabet": "text.util.ipynb",
"basic_cleaners": "text.util.ipynb",
"transliteration_cleaners": "text.util.ipynb",
"english_cleaners": "text.util.ipynb",
"english_cleaners_phonemizer": "text.util.ipynb",
"batch_english_cleaners_phonemizer": "text.util.ipynb",
"g2p": "text.util.ipynb",
"batch_clean_text": "text.util.ipynb",
"clean_text": "text.util.ipynb",
"english_to_arpabet": "text.util.ipynb",
"cleaned_text_to_sequence": "text.util.ipynb",
"text_to_sequence": "text.util.ipynb",
"sequence_to_text": "text.util.ipynb",
"BATCH_CLEANERS": "text.util.ipynb",
"CLEANERS": "text.util.ipynb",
"text_to_sequence_for_editts": "text.util.ipynb",
"random_utterance": "text.util.ipynb",
"utterances": "text.util.ipynb",
"TTSTrainer": "trainer.base.ipynb",
"GradTTSTrainer": "trainer.gradtts.ipynb",
"MellotronTrainer": "trainer.mellotron.ipynb",
"Tacotron2Loss": "trainer.tacotron2.ipynb",
"Tacotron2Trainer": "trainer.tacotron2.ipynb",
"feature_loss": "vocoders.hifigan.ipynb",
"discriminator_loss": "vocoders.hifigan.ipynb",
"generator_loss": "vocoders.hifigan.ipynb",
"kl_loss": "trainer.vits.ipynb",
"VITSTrainer": "trainer.vits.ipynb",
"mel_to_audio": "utils.audio.ipynb",
"differenceFunction": "utils.audio.ipynb",
"cumulativeMeanNormalizedDifferenceFunction": "utils.audio.ipynb",
"getPitch": "utils.audio.ipynb",
"compute_yin": "utils.audio.ipynb",
"convert_to_wav": "utils.audio.ipynb",
"match_target_amplitude": "utils.audio.ipynb",
"modify_leading_silence": "utils.audio.ipynb",
"normalize_audio_segment": "utils.audio.ipynb",
"normalize_audio": "utils.audio.ipynb",
"trim_audio": "utils.audio.ipynb",
"MAX_WAV_INT16": "utils.audio.ipynb",
"load_wav_to_torch": "utils.audio.ipynb",
"overlay_mono": "utils.audio.ipynb",
"overlay_stereo": "utils.audio.ipynb",
"mono_to_stereo": "utils.audio.ipynb",
"stereo_to_mono": "utils.audio.ipynb",
"resample": "utils.audio.ipynb",
"get_audio_max": "utils.audio.ipynb",
"to_int16": "utils.audio.ipynb",
"save_figure_to_numpy": "utils.plot.ipynb",
"plot_tensor": "utils.plot.ipynb",
"plot_spectrogram": "utils.plot.ipynb",
"plot_attention": "utils.plot.ipynb",
"plot_attention_phonemes": "utils.plot.ipynb",
"plot_gate_outputs": "utils.plot.ipynb",
"load_filepaths_and_text": "utils.utils.ipynb",
"window_sumsquare": "utils.utils.ipynb",
"griffin_lim": "utils.utils.ipynb",
"dynamic_range_compression": "utils.utils.ipynb",
"dynamic_range_decompression": "utils.utils.ipynb",
"to_gpu": "utils.utils.ipynb",
"get_mask_from_lengths": "utils.utils.ipynb",
"reduce_tensor": "utils.utils.ipynb",
"subsequent_mask": "utils.utils.ipynb",
"slice_segments": "utils.utils.ipynb",
"rand_slice_segments": "utils.utils.ipynb",
"init_weights": "vocoders.hifigan.ipynb",
"get_padding": "vocoders.hifigan.ipynb",
"fused_add_tanh_sigmoid_multiply": "utils.utils.ipynb",
"clip_grad_value_": "utils.utils.ipynb",
"intersperse": "utils.utils.ipynb",
"intersperse_emphases": "utils.utils.ipynb",
"parse_values": "vendor.tfcompat.hparam.ipynb",
"HParams": "vendor.tfcompat.hparam.ipynb",
"PARAM_RE": "vendor.tfcompat.hparam.ipynb",
"HiFiGanGenerator": "vocoders.hifigan.ipynb",
"MultiScaleDiscriminator": "vocoders.hifigan.ipynb",
"AttrDict": "vocoders.hifigan.ipynb",
"build_env": "vocoders.hifigan.ipynb",
"apply_weight_norm": "vocoders.hifigan.ipynb"}
modules = ["data/cache.py",
"data/parse.py",
"data/statistics.py",
"data_loader.py",
"e2e.py",
"exec/dataset_statistics.py",
"exec/gather_dataset.py",
"exec/generate_filelist.py",
"exec/normalize_audio.py",
"exec/parse_data.py",
"exec/preprocess_vits.py",
"exec/split_train_val.py",
"exec/train_gradtts.py",
"exec/train_mellotron.py",
"exec/train_tacotron2.py",
"exec/train_vits.py",
"models/attentions.py",
"models/base.py",
"models/common.py",
"models/editts.py",
"models/gradtts.py",
"models/mellotron.py",
"models/tacotron2.py",
"models/torchmoji.py",
"models/transforms.py",
"models/vits.py",
"monitoring/generate.py",
"monitoring/statistics.py",
"text/cmudict.py",
"text/symbols.py",
"text/util.py",
"trainer/base.py",
"trainer/gradtts.py",
"trainer/mellotron.py",
"trainer/tacotron2.py",
"trainer/vits.py",
"utils/argparse.py",
"utils/audio.py",
"utils/plot.py",
"utils/utils.py",
"vendor/tfcompat/hparam.py",
"vocoders/hifigan.py"]
doc_url = "https://uberduck-ai.github.io/uberduck_ml_dev/"
git_url = "https://github.com/uberduck-ai/uberduck_ml_dev/tree/master/"
def custom_doc_links(name):
return None
|
rl_agents/agents/tree_search/mdp_gape.py | RockWenJJ/rl-agents | 342 | 11137619 | import logging
import numpy as np
from rl_agents.agents.common.factory import safe_deepcopy_env
from rl_agents.agents.tree_search.olop import OLOP, OLOPAgent, OLOPNode
from rl_agents.utils import max_expectation_under_constraint, kl_upper_bound
logger = logging.getLogger(__name__)
class MDPGapE(OLOP):
"""
Best-Arm Identification MCTS.
"""
def __init__(self, env, config=None):
super().__init__(env, config)
self.next_observation = None
self.budget_used = 0
@classmethod
def default_config(cls):
cfg = super().default_config()
cfg.update(
{
"accuracy": 1.0,
"confidence": 0.9,
"continuation_type": "uniform",
"horizon_from_accuracy": False,
"max_next_states_count": 1,
"upper_bound": {
"type": "kullback-leibler",
"time": "global",
"threshold": "3*np.log(1 + np.log(count))"
"+ horizon*np.log(actions)"
"+ np.log(1/(1-confidence))",
"transition_threshold": "0.1*np.log(time)"
},
}
)
return cfg
def reset(self):
if "horizon" not in self.config:
self.allocate_budget()
self.root = DecisionNode(parent=None, planner=self)
def allocate_budget(self):
"""
Allocate the computational budget into tau episodes of fixed horizon H.
"""
if self.config["horizon_from_accuracy"]:
self.config["horizon"] = int(np.ceil(np.log(self.config["accuracy"] * (1 - self.config["gamma"]) / 2) \
/ np.log(self.config["gamma"])))
self.config["episodes"] = self.config["budget"] // self.config["horizon"]
assert self.config["episodes"] > 1
logger.debug("Planning at depth H={}".format(self.config["horizon"]))
else:
super().allocate_budget()
def run(self, state):
"""
Run an MDP-GapE episode.
:param state: the initial environment state
"""
# We need randomness
state.seed(self.np_random.randint(2**30))
best, challenger = None, None
if self.root.children:
logger.debug(" / ".join(["a{} ({}): [{:.3f}, {:.3f}]".format(k, n.count, n.value_lower, n.value_upper)
for k, n in self.root.children.items()]))
else:
self.root.expand(state)
# Follow selection policy, expand tree if needed, collect rewards and update confidence bounds.
decision_node = self.root
for h in range(self.config["horizon"]):
action = decision_node.sampling_rule(n_actions=state.action_space.n)
# Perform transition
chance_node, action = decision_node.get_child(action, state)
observation, reward, done, _ = self.step(state, action)
decision_node = chance_node.get_child(observation)
# Update local statistics
chance_node.update(np.nan, False)
decision_node.update(reward, done)
# Backup global statistics
decision_node.backup_to_root()
_, best, challenger = self.root.best_arm_identification_selection()
return best, challenger
def plan(self, state, observation):
done = False
episode = 0
while not done:
best, challenger = self.run(safe_deepcopy_env(state))
# Stopping rule
done = challenger.value_upper - best.value_lower < self.config["accuracy"] if best is not None else False
done = done or episode > self.config["episodes"]
episode += 1
if episode % 10 == 0:
logger.debug('Episode {}: delta = {}/{}'.format(episode,
challenger.value_upper - best.value_lower,
self.config["accuracy"]))
self.budget_used = episode * self.config["horizon"]
return self.get_plan()
def step_tree(self, actions):
"""
Update the planner tree when the agent performs an action and observes the next state
:param actions: a sequence of actions to follow from the root node
"""
if self.config["step_strategy"] == "reset":
self.step_by_reset()
elif self.config["step_strategy"] == "subtree":
if actions:
self.step_by_subtree(actions[0])
self.step_by_subtree(str(self.next_observation)) # Step to the observed next state
else:
self.step_by_reset()
else:
logger.warning("Unknown step strategy: {}".format(self.config["step_strategy"]))
self.step_by_reset()
def get_plan(self):
"""Only return the first action, the rest is conditioned on observations"""
return [self.root.selection_rule()]
class DecisionNode(OLOPNode):
def __init__(self, parent, planner):
super().__init__(parent, planner)
self.depth = 0 if parent is None else parent.depth + 1
self.mu_lcb = -np.infty
""" Lower bound of the node mean reward. """
if self.planner.config["upper_bound"]["type"] == "kullback-leibler":
self.mu_lcb = 0
gamma = self.planner.config["gamma"]
H = self.planner.config["horizon"]
self.value_upper = (1 - gamma ** (H - self.depth)) / (1 - gamma)
""" Lower bound on the node optimal reward-to-go """
self.value_lower = 0
self.gap = -np.infty
""" Maximum possible gap from this node to its neighbours, based on their value confidence intervals """
def get_child(self, action, state):
if not self.children:
self.expand(state)
if action not in self.children: # Default action may not be available
action = list(self.children.keys())[0] # Pick first available action instead
return self.children[action], action
def expand(self, state):
if state is None:
raise Exception("The state should be set before expanding a node")
try:
actions = state.get_available_actions()
except AttributeError:
actions = range(state.action_space.n)
for action in actions:
self.children[action] = ChanceNode(self, self.planner)
def selection_rule(self):
# Best arm identification at the root
if self.planner.root == self:
_, best_node, _ = self.best_arm_identification_selection()
return next(best_node.path())
# Then follow the conservative values
actions = list(self.children.keys())
index = self.random_argmax([self.children[a].value_lower for a in actions])
return actions[index]
def sampling_rule(self, n_actions):
# Best arm identification at the root
if self == self.planner.root: # Run BAI at the root
selected_child, _, _ = self.best_arm_identification_selection()
action = next(selected_child.path())
# Elsewhere, follow the optimistic values
elif self.children:
actions = list(self.children.keys())
index = self.random_argmax([self.children[a].value_upper for a in actions])
action = actions[index]
# Break ties at leaves
else:
action = self.planner.np_random.randint(n_actions) \
if self.planner.config["continuation_type"] == "uniform" else 0
return action
def compute_reward_ucb(self):
if self.planner.config["upper_bound"]["type"] == "kullback-leibler":
# Variables available for threshold evaluation
horizon = self.planner.config["horizon"]
actions = self.planner.env.action_space.n
confidence = self.planner.config["confidence"]
count = self.count
time = self.planner.config["episodes"]
threshold = eval(self.planner.config["upper_bound"]["threshold"])
self.mu_ucb = kl_upper_bound(self.cumulative_reward, self.count, threshold)
self.mu_lcb = kl_upper_bound(self.cumulative_reward, self.count, threshold, lower=True)
else:
logger.error("Unknown upper-bound type")
def backup_to_root(self):
"""
Bellman V(s) = max_a Q(s,a)
"""
if self.children:
self.value_upper = np.amax([child.value_upper for child in self.children.values()])
self.value_lower = np.amax([child.value_lower for child in self.children.values()])
else:
assert self.depth == self.planner.config["horizon"]
self.value_upper = 0 # Maybe count bound over r(H..inf) ?
self.value_lower = 0 # Maybe count bound over r(H..inf) ?
if self.parent:
self.parent.backup_to_root()
def compute_children_gaps(self):
"""
For best arm identification: compute for each child how much the other actions are potentially better.
"""
for child in self.children.values():
child.gap = -np.infty
for other in self.children.values():
if other is not child:
child.gap = max(child.gap, other.value_upper - child.value_lower)
def best_arm_identification_selection(self):
"""
Run UGapE on the children on this node, based on their value confidence intervals.
:return: selected arm, best candidate, challenger
"""
# Best candidate child has the lowest potential gap
self.compute_children_gaps()
best = min(self.children.values(), key=lambda c: c.gap)
# Challenger: not best and highest value upper bound
challenger = max([c for c in self.children.values() if c is not best], key=lambda c: c.value_upper)
# Selection: the one with highest uncertainty
return max([best, challenger], key=lambda n: n.value_upper - n.value_lower), best, challenger
class ChanceNode(OLOPNode):
def __init__(self, parent, planner):
assert parent is not None
super().__init__(parent, planner)
self.depth = parent.depth
gamma = self.planner.config["gamma"]
self.value_upper = (1 - gamma ** (self.planner.config["horizon"] - self.depth)) / (1 - gamma)
self.value_lower = 0
self.p_hat, self.p_plus, self.p_minus = None, None, None
delattr(self, 'cumulative_reward')
delattr(self, 'mu_ucb')
def update(self, reward, done):
self.count += 1
def expand(self, state):
# Generate placeholder nodes
for i in range(self.planner.config["max_next_states_count"]):
self.children["placeholder_{}".format(i)] = DecisionNode(self, self.planner)
def get_child(self, observation, hash=False):
if not self.children:
self.expand(None)
import hashlib
state_id = hashlib.sha1(str(observation).encode("UTF-8")).hexdigest()[:5] if hash else str(observation)
if state_id not in self.children:
# Assign the first available placeholder to the observation
for i in range(self.planner.config["max_next_states_count"]):
if "placeholder_{}".format(i) in self.children:
self.children[state_id] = self.children.pop("placeholder_{}".format(i))
break
else:
raise ValueError("No more placeholder nodes available, we observed more next states than "
"the 'max_next_states_count' config")
return self.children[state_id]
def backup_to_root(self):
"""
Bellman Q(s,a) = r(s,a) + gamma E_s' V(s')
"""
assert self.children
assert self.parent
gamma = self.planner.config["gamma"]
children = list(self.children.values())
u_next = np.array([c.mu_ucb + gamma * c.value_upper for c in children])
l_next = np.array([c.mu_lcb + gamma * c.value_lower for c in children])
self.p_hat = np.array([child.count for child in children]) / self.count
threshold = self.transition_threshold() / self.count
self.p_plus = max_expectation_under_constraint(u_next, self.p_hat, threshold)
self.p_minus = max_expectation_under_constraint(-l_next, self.p_hat, threshold)
self.value_upper = self.p_plus @ u_next
self.value_lower = self.p_minus @ l_next
self.parent.backup_to_root()
def transition_threshold(self):
horizon = self.planner.config["horizon"]
actions = self.planner.env.action_space.n
confidence = self.planner.config["confidence"]
count = self.count
time = self.planner.config["episodes"]
return eval(self.planner.config["upper_bound"]["transition_threshold"])
class MDPGapEAgent(OLOPAgent):
"""
An agent that uses best-arm-identification to plan a sequence of actions in an MDP.
"""
PLANNER_TYPE = MDPGapE
def step(self, actions):
"""
Handle receding horizon mechanism with chance nodes
"""
replanning_required = self.remaining_horizon == 0 # Cannot check remaining actions here
if replanning_required:
self.remaining_horizon = self.config["receding_horizon"] - 1
self.planner.step_by_reset()
else:
self.remaining_horizon -= 1
self.planner.step_tree(actions)
# Check for remaining children here instead
if self.planner.root.children:
self.previous_actions.extend(self.planner.get_plan())
else: # After stepping the transition in the tree, the subtree is empty
replanning_required = True
self.planner.step_by_reset()
return replanning_required
def record(self, state, action, reward, next_state, done, info):
self.planner.next_observation = next_state
|
BitsParser.py | fireeye/BitsParser | 104 | 11137624 | # Copyright 2021 FireEye, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import os
import sys
import json
import string
import struct
import hashlib
import argparse
import datetime
import traceback
from ese.ese import ESENT_DB
# On Windows advapi32 will be used to resolve SIDs
try:
import advapi32
except Exception:
pass
import bits
from bits.structs import FILE, CONTROL, JOB
# XFER_HEADER defined as bytes
XFER_HEADER = b'\x36\xDA\x56\x77\x6F\x51\x5A\x43\xAC\xAC\x44\xA2\x48\xFF\xF3\x4D'
# File and job delimiter constants for Windows 10
WIN10_FILE_DELIMITER = b'\xE4\xCF\x9E\x51\x46\xD9\x97\x43\xB7\x3E\x26\x85\x13\x05\x1A\xB2'
WIN10_JOB_DELIMITERS = [
b'\xA1\x56\x09\xE1\x43\xAF\xC9\x42\x92\xE6\x6F\x98\x56\xEB\xA7\xF6',
b'\x9F\x95\xD4\x4C\x64\x70\xF2\x4B\x84\xD7\x47\x6A\x7E\x62\x69\x9F',
b'\xF1\x19\x26\xA9\x32\x03\xBF\x4C\x94\x27\x89\x88\x18\x95\x88\x31',
b'\xC1\x33\xBC\xDD\xFB\x5A\xAF\x4D\xB8\xA1\x22\x68\xB3\x9D\x01\xAD',
b'\xd0\x57\x56\x8f\x2c\x01\x3e\x4e\xad\x2c\xf4\xa5\xd7\x65\x6f\xaf',
b'\x50\x67\x41\x94\x57\x03\x1d\x46\xa4\xcc\x5d\xd9\x99\x07\x06\xe4'
]
class BitsParser:
def __init__(self, queue_dir, carve_db, carve_all, out_file):
self.queue_dir = queue_dir
self.carve_db_files = carve_db
self.carve_all_files = carve_all
self.out_file = out_file
self.sid_user_cache = {}
self.visited_jobs = set()
# Assume files are from Windows 10 by default -- will be verified later
self.is_win_10 = True
def get_username_from_sid(self, sid):
""" Returns the username associated with the given SID by calling LookupAccountSid """
# Cache usernames to improve efficiency with repeated lookups
if sid in self.sid_user_cache:
return self.sid_user_cache[sid]
try:
name, domain, _ = advapi32.LookupAccountSid(advapi32.ConvertStringSidToSid(sid))
username = domain+"\\"+name
self.sid_user_cache[sid] = username
return username
except Exception as e:
print(f'Failed to resolve sid {sid}: ' + str(e), file=sys.stderr)
self.sid_user_cache[sid] = None
return None
def is_qmgr_database(file_data):
""" Attempts to locate pattern at 0x10 found in qmgr databases (prior to Windows 10) """
if file_data[0x10:0x20] == b'\x13\xf7\x2b\xc8\x40\x99\x12\x4a\x9f\x1a\x3a\xae\xbd\x89\x4e\xea':
return True
return False
def is_qmgr10_database(file_data):
""" Attempts to locate ESE database magic number found in Windows 10 qmgr databases """
if file_data[4:8] == b'\xEF\xCD\xAB\x89':
return True
return False
def load_qmgr_jobs(self, file_path):
""" Processes the given qmgr database file with ANSSI-FR, parses jobs (possibly carves jobs), and returns a list of discovered jobs. """
jobs = []
analyzer = bits.Bits.load_file(file_path)
if self.carve_db_files or self.carve_all_files:
for job in analyzer:
jobs.append(BitsJob(job, self))
else:
for job in analyzer.parse():
jobs.append(BitsJob(job, self))
return jobs
def load_non_qmgr_jobs(self, file_data):
""" Attempts to "carve" jobs from non-qmgr files (sometimes job remnants can be found in other files) """
jobs = []
analyzer = bits.Bits()
# Search for the XFER header and get 2KB of data around it
for sample in bits.sample_disk(file_data, XFER_HEADER, 2048):
analyzer.append_data(sample)
# Attempt to parse jobs from memory block
analyzer.guess_info()
for job in analyzer:
jobs.append(BitsJob(job, self))
return jobs
def parse_qmgr10_job(self, job_data):
"""Attempt to parse job data from the Win10 qmgr database"""
# Skip small entires that are not valid
if len(job_data) < 128:
return None
try:
# Because it can be expensive to parse a JOB structure if the data is not valid,
# do a simple check to see if the job name length is valid
name_length = struct.unpack_from("<L", job_data, 32)[0]
if 32 + name_length * 2 > len(job_data):
return None
# Parse as a JOB
try:
parsed_job = JOB.parse(job_data)
except Exception:
# If it fails to parse as a JOB, at least try to parse as a CONTROL struct
try:
parsed_job = CONTROL.parse(job_data)
except Exception:
return None
try:
# Following the JOB entry, there are usually XFER refs to FILE GUIDs
parsed_job['files'] = []
xfer_parts = job_data.split(XFER_HEADER)
file_ref_data = xfer_parts[1]
num_file_refs = struct.unpack_from("<L", file_ref_data)[0]
# Validate the number of file references to avoid expensive parsing failures
if 4 + num_file_refs * 16 > len(file_ref_data):
return None
for i in range(0, num_file_refs):
# Parse the GUID and attempt to find correlated FILE
cur_guid = file_ref_data[4+i*16:4+(i+1)*16]
file_job = self.file_entries.pop(cur_guid, None)
if file_job:
parsed_job['files'].extend(file_job['files'])
except Exception:
pass
# Build a BitsJob for the job entry
new_job = BitsJob(parsed_job, self)
return new_job
except Exception:
print(f'Exception occurred parsing job: ' + traceback.format_exc(), file=sys.stderr)
return None
def parse_qmgr10_file(self, file_data, suppress_duplicates):
"""Attempt to parse file data from the Win10 qmgr database"""
# Skip small entires that are not valid
if len(file_data) < 256:
return None
try:
# Because it can be expensive to parse a FILE structure if the data is not valid,
# do a simple check to see if the filename length is valid
filename_length = struct.unpack_from("<L", file_data)[0]
if 4 + filename_length * 2 > len(file_data):
return None
# Parse the FILE
parsed_file = FILE.parse(file_data)
# Build a BitsJob for the file entry (set entry as files list)
cur_job = {}
cur_job['files'] = [parsed_file]
# There is usually a timestamp 29 bytes into the file structure, which appears to correlate to creation time
filetime = struct.unpack_from("<Q", file_data, parsed_file.offset + 29)[0]
if filetime != 0:
cur_job['ctime'] = datetime.datetime(1601, 1, 1) + datetime.timedelta(microseconds=(filetime / 10))
return cur_job
except Exception:
return None
@staticmethod
def process_qmgr10_rows(table):
"""Given a table, processes the rows by getting data and excluding leading GUIDs"""
# Enumerate records
for i in range(table.get_number_of_records()):
cur_record = table.get_record(i)
num_values = cur_record.get_number_of_values()
if num_values != 2:
continue
try:
# Get the record Id GUID
if cur_record.is_long_value(0):
guid = cur_record.get_value_data_as_long_value(0).data
else:
guid = cur_record.get_value_data(0)
# Get the record Blob data
if cur_record.is_long_value(1):
val = cur_record.get_value_data_as_long_value(1).data
else:
val = cur_record.get_value_data(1)
# Return the data if it's at least 16 bytes (exclude the first 16 bytes)
if len(val) > 16:
yield guid, val[16:]
except Exception:
pass
def load_qmgr10_db(self, file_data):
"""Loads the qmgr.db and attempts to enumerate the Jobs and Files tables to parse records"""
jobs = []
self.file_entries = {}
# Parse the database
ese = ESENT_DB(file_data)
# Enumerate files, store file entries to file_entries mapping
files_table = ese.openTable("Files")
while True:
file_record = ese.getNextRow(files_table)
if file_record is None:
break
guid = file_record.get(b'Id')
new_job = self.parse_qmgr10_file(file_record.get(b'Blob', b''), False)
if guid and new_job:
self.file_entries[guid] = new_job
# Enumerate jobs (and correlate to files)
jobs_table = ese.openTable("Jobs")
while True:
job_record = ese.getNextRow(jobs_table)
if job_record is None:
break
guid = job_record.get(b'Id')
job_data = job_record.get(b'Blob', b'')[16:]
new_job = self.parse_qmgr10_job(job_data)
if guid and new_job:
jobs.append(new_job)
# If any file records were not correlated to JOBs just add them as their own jobs
for guid, file_job in self.file_entries.items():
jobs.append(BitsJob(file_job, self))
return jobs
def carve_qmgr10_records(self, file_data):
""" Attempts to carve jobs from a qmgr database file using expected file and job GUIDs"""
jobs = []
self.file_entries = {}
# Carve file entries from the database, store to file_entries mapping
cur_offset = file_data.find(WIN10_FILE_DELIMITER)
while cur_offset > 0:
next_offset = file_data.find(WIN10_FILE_DELIMITER, cur_offset+len(WIN10_FILE_DELIMITER))
if next_offset > 0:
file_job = self.parse_qmgr10_file(file_data[cur_offset+16:next_offset], True)
else:
file_job = self.parse_qmgr10_file(file_data[cur_offset+16:], True)
if file_job:
guid = file_data[cur_offset-22:cur_offset-6]
self.file_entries[guid] = file_job
cur_offset = next_offset
# Carve jobs from the database (note that there are multiple potential job delimiters)
for job_delimiter in WIN10_JOB_DELIMITERS:
carved_jobs = file_data.split(job_delimiter)
if len(carved_jobs) == 1:
continue
for i in range(1, len(carved_jobs)):
new_job = self.parse_qmgr10_job(carved_jobs[i])
if new_job:
new_job.job_dict['Carved'] = True
jobs.append(new_job)
# If any file records were not correlated to JOBs just add them as their own jobs
for guid, carved_job in self.file_entries.items():
file_job = BitsJob(carved_job, self)
file_job.job_dict['Carved'] = True
jobs.append(file_job)
return jobs
def load_qmgr10_jobs(self, file_data):
"""
Attempt to parse Windows 10 qmgr jobs by carving JOB and FILE records out of the database using record identifiers.
Unfortunately there is not a way to correlate job and file entries in Win10 qmgr databases, so we have to create separate entries for each.
"""
# Parse active job and file records in the database
jobs = self.load_qmgr10_db(file_data)
# Carve deleted job and file entires if requested
if self.carve_db_files or self.carve_all_files:
jobs.extend(self.carve_qmgr10_records(file_data))
return jobs
def output_jobs(self, file_path, jobs):
"""Cleans up and outputs the parsed jobs from the qmgr database files"""
# If an output file is specified, open it and use it instead of stdout
if self.out_file:
orig_stdout = sys.stdout
sys.stdout = open(self.out_file, "w")
try:
for job in jobs:
# Skip incomplete carved jobs as they do not contain useful info
if job.is_carved() and not job.is_useful_for_analysis():
continue
# Output unique jobs
if job.hash not in self.visited_jobs:
formatted_job = json.dumps(job.job_dict, indent=4)
print(formatted_job)
self.visited_jobs.add(job.hash)
finally:
if self.out_file:
sys.stdout.close()
sys.stdout = orig_stdout
def process_file(self, file_path):
""" Processes the given BITS file. Attempts to find/parse jobs. """
try:
# Read the file (may need to raw read)
print("Processing file "+file_path, file=sys.stderr)
file_data = None
with open(file_path, "rb") as f:
file_data = f.read()
# Parse as a qmgr database (support old and Win10 formats)
jobs = []
if BitsParser.is_qmgr_database(file_data):
jobs = self.load_qmgr_jobs(file_path)
elif BitsParser.is_qmgr10_database(file_data):
jobs = self.load_qmgr10_jobs(file_data)
# Try to "carve" jobs if the file is not a qmgr database (and carving is enabled)
elif self.carve_all_files:
if self.is_win_10:
jobs = self.carve_qmgr10_records(file_data)
else:
jobs = self.load_non_qmgr_jobs(file_data)
self.output_jobs(file_path, jobs)
except Exception:
print(f'Exception occurred processing file {file_path}: ' + traceback.format_exc(), file=sys.stderr)
def determine_directory_architecture(self, path):
""" Determines if the files within the directory suggest it came from a Windows 10 system or an older system """
if os.path.exists(path + os.sep + "qmgr.db"):
self.is_win_10 = True
elif os.path.exists(path + os.sep + "qmgr0.dat"):
self.is_win_10 = False
def run(self):
""" Finds and processes BITS database files """
# If the queue "directory" is a file, just process the file
if os.path.isfile(self.queue_dir):
self.process_file(self.queue_dir)
return
# Determine if the directory appears to belong to a Windows 10 system or an older system for carving
self.determine_directory_architecture(self.queue_dir)
# List files in the queue directory and process
for f in os.listdir(self.queue_dir):
cur_path = self.queue_dir + os.sep + f
if not os.path.isfile(cur_path):
continue
self.process_file(cur_path)
class BitsJob:
"""
Provides methods for reformatting parsed jobs from the ANSSI-FR library
"""
# Mappings between types returned by ANSSI-FR library and our output fields
FILE_MAP = dict(
src_fn="SourceURL",
dest_fn="DestFile",
tmp_fn="TmpFile",
download_size="DownloadByteSize",
transfer_size="TransferByteSize",
vol_guid="VolumeGUID"
)
JOB_MAP = dict(
job_id="JobId",
type="JobType",
priority="JobPriority",
state="JobState",
name="JobName",
desc="JobDesc",
cmd="CommandExecuted",
args="CommandArguments",
sid="OwnerSID",
ctime="CreationTime",
mtime="ModifiedTime",
carved="Carved",
files="Files",
queue_path="QueuePath"
)
def __init__(self, job, bits_parser):
""" Initialize a BitsJob with a parsed job dictionary and a reference to BitsParser """
self.job = job
self.bits_parser = bits_parser
self.hash = None
self.job_dict = {}
if bits_parser.carve_db_files or bits_parser.carve_all_files:
self.job_dict = {'Carved': False}
self.parse()
def is_useful_for_analysis(self, cur_dict=None):
""" Returns True if the job contains at least one "useful" field (discards useless "carved" entries) and the ctime field exists """
useful_fields = ['SourceURL', 'DestFile', 'TmpFile', 'JobId', 'JobState', 'CommandExecuted', 'CommandArguments']
if not cur_dict:
cur_dict = self.job_dict
for k, v in cur_dict.items():
if k in useful_fields and v:
return True
# Handle lists of dicts, like we have for the Files field
if isinstance(v, list):
for d in v:
if self.is_useful_for_analysis(d):
return True
return False
def is_carved(self):
""" Simple function returns True if the job was carved """
return self.job_dict.get('Carved') is True
@staticmethod
def escape(input_str):
""" Simple escape function to eliminating non-printable characters from strings """
if not isinstance(input_str, str) or input_str.isprintable():
return input_str
return ''.join(filter(lambda x: x in string.printable, input_str))
def parse(self):
"""
Converts the fields in self.job into format used for output and separates file entries.
Does some formatting and type conversion. Also computes a hash of the job for quick comparison.
"""
file_fields = ['args', 'cmd', 'dest_fn', 'tmp_fn']
job_hash = hashlib.md5()
for k, v in self.job.items():
# Map the attribute name, skip empty or unmapped values
alias = self.JOB_MAP.get(k)
if not alias:
continue
elif not v or str(v).strip() == '':
continue
# Convert timestamps into normal isoformat
elif isinstance(v, datetime.datetime):
self.job_dict[alias] = v.replace(microsecond=0).isoformat() + 'Z'
# Convert boolean values to lowercase
elif isinstance(v, bool):
self.job_dict[alias] = str(v).lower()
# If this is a SID, convert to username and set owner
elif alias == self.JOB_MAP['sid']:
self.job_dict[alias] = str(v)
owner = self.bits_parser.get_username_from_sid(v)
if owner:
self.job_dict["Owner"] = owner
# The files field contains a list of files -- perform attribute mapping and environment variable resolution
elif alias == self.JOB_MAP['files']:
files_list = []
for file in v:
file_dict = {}
for k1, v1 in file.items():
# Map the transaction attribute name, skip empty, unmapped, or invalid values
t_alias = self.FILE_MAP.get(k1)
if not t_alias:
continue
elif v1 is None or str(v1).strip() == '' or not str(v1).isprintable():
continue
# Skip certain invalid values (if there is no value or if the value is -1 (DWORD64))
if v1 is None or v1 == 0xFFFFFFFFFFFFFFFF:
continue
# If this is a file field, resolve and add to the list of files
if k1 in file_fields:
file_dict[t_alias] = os.path.expandvars(v1)
else:
file_dict[t_alias] = v1
# Update the object hash
job_hash.update(str(file_dict[t_alias]).encode('utf-8'))
files_list.append(file_dict)
self.job_dict['Files'] = files_list
else:
self.job_dict[alias] = v
# Escape non-printable chars if appropriate
self.job_dict[alias] = self.escape(self.job_dict[alias])
# Update the object hash
if type(v) is not 'Dict':
job_hash.update(str(v).encode('utf-8'))
self.hash = job_hash.hexdigest()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', default='%ALLUSERSPROFILE%\\Microsoft\\Network\\Downloader', help='Optionally specify the directory containing QMGR databases or the path to a file to process.')
parser.add_argument('--output', '-o', help='Optionally specify a file for JSON output. If not specified the output will be printed to stdout.')
parser.add_argument('--carvedb', action='store_true', help='Carve deleted records from database files')
parser.add_argument('--carveall', action='store_true', help='Carve deleted records from all other files')
parsed_args = parser.parse_args()
queue_dir = os.path.expandvars(parsed_args.input)
bits_parser = BitsParser(queue_dir, parsed_args.carvedb, parsed_args.carveall, parsed_args.output)
bits_parser.run()
|
office365/sharepoint/folders/folder.py | vgrem/Office365-REST-Python-Client | 544 | 11137653 | from office365.runtime.client_result import ClientResult
from office365.runtime.queries.create_entity_query import CreateEntityQuery
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.queries.update_entity_query import UpdateEntityQuery
from office365.runtime.paths.resource_path import ResourcePath
from office365.runtime.paths.service_operation import ServiceOperationPath
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.changes.change_collection import ChangeCollection
from office365.sharepoint.changes.change_query import ChangeQuery
from office365.sharepoint.contenttypes.content_type_id import ContentTypeId
from office365.sharepoint.listitems.listitem import ListItem
from office365.sharepoint.storagemetrics.storage_metrics import StorageMetrics
from office365.sharepoint.utilities.move_copy_options import MoveCopyOptions
from office365.sharepoint.utilities.move_copy_util import MoveCopyUtil
from office365.sharepoint.types.resource_path import ResourcePath as SPResPath
from office365.runtime.compat import urlparse
class Folder(BaseEntity):
"""Represents a folder in a SharePoint Web site."""
@staticmethod
def from_url(abs_url):
"""
Addresses a Folder by absolute url
:type abs_url: str
"""
from office365.sharepoint.client_context import ClientContext
ctx = ClientContext.from_url(abs_url)
relative_url = abs_url.replace(ctx.base_url, "")
return ctx.web.get_folder_by_server_relative_url(relative_url)
def recycle(self):
"""Moves the folder to the Recycle Bin and returns the identifier of the new Recycle Bin item."""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "Recycle", None, None, None, result)
self.context.add_query(qry)
return result
def recycle_with_parameters(self, parameters):
"""
:type parameters: office365.sharepoint.folders.folder_delete_parameters.FolderDeleteParameters
"""
result = ClientResult(self.context)
qry = ServiceOperationQuery(self, "RecycleWithParameters", None, parameters, "parameters", result)
self.context.add_query(qry)
return result
def get_changes(self, query=None):
"""Returns the collection of changes from the change log that have occurred within the folder,
based on the specified query.
:param office365.sharepoint.changeQuery.ChangeQuery query: Specifies which changes to return
"""
if query is None:
query = ChangeQuery(folder=True)
changes = ChangeCollection(self.context)
qry = ServiceOperationQuery(self, "getChanges", None, query, "query", changes)
self.context.add_query(qry)
return changes
def get_list_item_changes(self, query):
"""
:param office365.sharepoint.changeQuery.ChangeQuery query: Specifies which changes to return
"""
changes = ChangeCollection(self.context)
qry = ServiceOperationQuery(self, "getListItemChanges", None, query, "query", changes)
self.context.add_query(qry)
return changes
def add(self, name):
"""Adds the folder that is located under a current folder
:type name: str
"""
new_folder = Folder(self.context)
def _add_sub_folder():
new_folder_url = "/".join([self.serverRelativeUrl, name])
new_folder.set_property("ServerRelativeUrl", new_folder_url)
qry = CreateEntityQuery(self.folders, new_folder, new_folder)
self.context.add_query(qry)
self.ensure_property("ServerRelativeUrl", _add_sub_folder)
return new_folder
def rename(self, name):
"""Rename a Folder resource
:type name: str
"""
item = self.list_item_all_fields
item.set_property('Title', name)
item.set_property('FileLeafRef', name)
qry = UpdateEntityQuery(item)
self.context.add_query(qry)
return self
def upload_file(self, file_name, content):
"""Uploads a file into folder
:type file_name: str
:type content: str
:rtype: office365.sharepoint.files.file.File
"""
return self.files.upload(file_name, content)
def copy_to(self, new_relative_url, keep_both=False, reset_author_and_created=False):
"""Copies the folder with files to the destination URL.
:type new_relative_url: str
:type keep_both: bool
:type reset_author_and_created: bool
"""
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativeUrl", new_relative_url)
def _copy_folder():
opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)
MoveCopyUtil.copy_folder(self.context, self._build_full_url(self.serverRelativeUrl),
self._build_full_url(new_relative_url), opts)
self.ensure_property("ServerRelativeUrl", _copy_folder)
return target_folder
def copy_to_by_path(self, new_relative_path, keep_both=False, reset_author_and_created=False):
"""Copies the folder with files to the destination Path.
:type new_relative_path: str
:type keep_both: bool
:type reset_author_and_created: bool
"""
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativePath", SPResPath(new_relative_path))
def _copy_folder():
opts = MoveCopyOptions(keep_both=keep_both, reset_author_and_created_on_copy=reset_author_and_created)
MoveCopyUtil.copy_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),
self._build_full_url(new_relative_path), opts)
self.ensure_property("ServerRelativePath", _copy_folder)
return target_folder
def move_to(self, new_relative_url, retain_editor_and_modified=False):
"""Moves the folder with files to the destination URL.
:type new_relative_url: str
:type retain_editor_and_modified: bool
"""
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativeUrl", new_relative_url)
def _move_folder():
MoveCopyUtil.move_folder(self.context, self._build_full_url(self.serverRelativeUrl),
self._build_full_url(new_relative_url),
MoveCopyOptions(retain_editor_and_modified_on_move=retain_editor_and_modified))
self.ensure_property("ServerRelativeUrl", _move_folder)
return target_folder
def move_to_by_path(self, new_relative_path, retain_editor_and_modified=False):
"""Moves the folder with files to the destination Path.
:type new_relative_path: str
:type retain_editor_and_modified: bool
"""
target_folder = Folder(self.context)
target_folder.set_property("ServerRelativePath", SPResPath(new_relative_path))
def _move_folder():
MoveCopyUtil.move_folder_by_path(self.context, self._build_full_url(self.server_relative_path.DecodedUrl),
self._build_full_url(new_relative_path),
MoveCopyOptions(
retain_editor_and_modified_on_move=retain_editor_and_modified))
self.ensure_property("ServerRelativePath", _move_folder)
return target_folder
@property
def storage_metrics(self):
""""""
return self.properties.get("StorageMetrics",
StorageMetrics(self.context, ResourcePath("StorageMetrics", self.resource_path)))
@property
def list_item_all_fields(self):
"""Specifies the list item fields (2) values for the list item corresponding to the folder."""
return self.properties.get("ListItemAllFields",
ListItem(self.context, ResourcePath("ListItemAllFields", self.resource_path)))
@property
def files(self):
"""Get a file collection"""
from office365.sharepoint.files.file_collection import FileCollection
return self.properties.get("Files",
FileCollection(self.context, ResourcePath("Files", self.resource_path)))
@property
def folders(self):
"""Specifies the collection of list folders contained within the list folder.
"""
from office365.sharepoint.folders.folder_collection import FolderCollection
return self.properties.get("Folders",
FolderCollection(self.context, ResourcePath("Folders", self.resource_path)))
@property
def parent_folder(self):
"""Specifies the list folder.
"""
return self.properties.get("ParentFolder",
Folder(self.context, ResourcePath("ParentFolder", self.resource_path)))
@property
def name(self):
"""Specifies the list folder name.
:rtype: str or None
"""
return self.properties.get("Name", None)
@property
def is_wopi_enabled(self):
return self.properties.get("IsWOPIEnabled", None)
@property
def prog_id(self):
"""Gets the identifier (ID) of the application in which the folder was created."""
return self.properties.get("ProgID", None)
@property
def unique_id(self):
"""Gets the unique ID of the folder.
:rtype: str or None
"""
return self.properties.get("UniqueId", None)
@property
def exists(self):
"""Gets a Boolean value that indicates whether the folder exists.
:rtype: bool or None
"""
return self.properties.get("Exists", None)
@property
def welcome_page(self):
"""Specifies the server-relative URL for the list folder Welcome page.
:rtype: str or None
"""
return self.properties.get("WelcomePage", None)
@property
def unique_content_type_order(self):
"""Specifies the content type order for the list folder.
:rtype: office365.sharepoint.contenttypes.content_type_id.ContentTypeId or None
"""
return self.properties.get("UniqueContentTypeOrder", ContentTypeId())
@property
def content_type_order(self):
"""Specifies the content type order for the list folder.
:rtype: office365.sharepoint.contenttypes.content_type_id.ContentTypeId or None
"""
return self.properties.get("ContentTypeOrder", ContentTypeId())
@property
def time_last_modified(self):
"""Gets the last time this folder or a direct child was modified in UTC.
:rtype: str or None
"""
return self.properties.get("TimeLastModified", None)
@property
def serverRelativeUrl(self):
"""Gets the server-relative URL of the list folder.
:rtype: str or None
"""
return self.properties.get("ServerRelativeUrl", None)
@property
def server_relative_path(self):
"""Gets the server-relative Path of the list folder.
:rtype: SPResPath or None
"""
return self.properties.get("ServerRelativePath", SPResPath(None))
def get_property(self, name, default_value=None):
if default_value is None:
property_mapping = {
"ContentTypeOrder": self.content_type_order,
"UniqueContentTypeOrder": self.unique_content_type_order,
"ListItemAllFields": self.list_item_all_fields,
"ParentFolder": self.parent_folder,
"ServerRelativePath": self.server_relative_path,
"StorageMetrics": self.storage_metrics
}
default_value = property_mapping.get(name, None)
return super(Folder, self).get_property(name, default_value)
def set_property(self, name, value, persist_changes=True):
super(Folder, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if name == "ServerRelativeUrl":
self._resource_path = ServiceOperationPath("getFolderByServerRelativeUrl", [value],
ResourcePath("Web"))
elif name == "ServerRelativePath":
self._resource_path = ServiceOperationPath("getFolderByServerRelativePath", [value],
ResourcePath("Web"))
elif name == "UniqueId":
self._resource_path = ServiceOperationPath("getFolderById", [value], ResourcePath("Web"))
return self
def _build_full_url(self, rel_url):
"""
:type rel_url: str
"""
site_path = urlparse(self.context.base_url).path
return self.context.base_url.replace(site_path, "") + rel_url
|
panel/models/layout.py | sthagen/holoviz-panel | 601 | 11137679 | <reponame>sthagen/holoviz-panel
from bokeh.core.properties import (
Bool, List, Nullable, String,
)
from bokeh.models import Column
class Card(Column):
active_header_background = Nullable(String, help="Background color of active Card header.")
button_css_classes = List(String, help="CSS classes to add to the Card collapse button.")
collapsed = Bool(True, help="Whether the Card is collapsed.")
collapsible = Bool(True, help="Whether the Card should have a button to collapse it.")
header_background = Nullable(String, help="Background color of the Card header.")
header_color = Nullable(String, help="Color of the header text and button.")
header_css_classes = List(String, help="CSS classes to add to the Card header.")
header_tag = String('div', help="HTML tag to use for the Card header.")
hide_header = Bool(False, help="Whether to hide the Card header")
tag = String('tag', help="CSS class to use for the Card as a whole.")
|
greykite/sklearn/transform/build_timeseries_features_transformer.py | CaduceusInc/greykite | 1,503 | 11137684 | <filename>greykite/sklearn/transform/build_timeseries_features_transformer.py
# BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: <NAME>
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.exceptions import NotFittedError
from greykite.common.constants import TIME_COL
from greykite.common.features.timeseries_features import build_time_features_df
from greykite.common.features.timeseries_features import convert_date_to_continuous_time
class BuildTimeseriesFeaturesTransformer(BaseEstimator, TransformerMixin):
"""Calculates time series features (e.g. year, month, hour etc.) of the input time series
Parameters
----------
time_col : string, default=TIME_COL
issues warning if fraction of nulls is above this value
Attributes
----------
origin_for_time_vars : float (e.g. 2019.2)
sets default origin so that "ct1" feature from `build_time_features_df` starts at 0 on start date of fitted data
"""
def __init__(self, time_col: str = TIME_COL):
self.time_col = time_col
self.origin_for_time_vars = None
def fit(self, X, y=None):
"""Sets the time origin for input time series"""
assert isinstance(X, pd.DataFrame)
dt = X[self.time_col]
self.origin_for_time_vars = convert_date_to_continuous_time(dt[0])
return self
def transform(self, X):
""" Calculates time series features of the input time series
Parameters
----------
X : pd.DataFrame
Returns
-------
A copy of the data frame with original time points and calculated features
"""
if self.origin_for_time_vars is None:
raise NotFittedError(
"This instance is not fitted yet. Call 'fit' with appropriate arguments "
"before calling 'transform'.")
assert isinstance(X, pd.DataFrame)
dt = X[self.time_col]
features_ts = build_time_features_df(dt, conti_year_origin=self.origin_for_time_vars)
output = pd.concat([dt, features_ts], axis=1)
return output
|
diffusion/models/models.py | DazhiZhong/v-diffusion-pytorch | 393 | 11137693 | <filename>diffusion/models/models.py
from . import cc12m_1, danbooru_128, imagenet_128, wikiart_128, wikiart_256, yfcc_1, yfcc_2
models = {
'cc12m_1': cc12m_1.CC12M1Model,
'cc12m_1_cfg': cc12m_1.CC12M1Model,
'danbooru_128': danbooru_128.Danbooru128Model,
'imagenet_128': imagenet_128.ImageNet128Model,
'wikiart_128': wikiart_128.WikiArt128Model,
'wikiart_256': wikiart_256.WikiArt256Model,
'yfcc_1': yfcc_1.YFCC1Model,
'yfcc_2': yfcc_2.YFCC2Model,
}
def get_model(model):
return models[model]
def get_models():
return list(models.keys())
|
utils.py | tolleybot/fast-depth | 759 | 11137714 | import os
import torch
import shutil
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import math
cmap = plt.cm.viridis
def parse_command():
data_names = ['nyudepthv2']
from dataloaders.dataloader import MyDataloader
modality_names = MyDataloader.modality_names
import argparse
parser = argparse.ArgumentParser(description='FastDepth')
parser.add_argument('--data', metavar='DATA', default='nyudepthv2',
choices=data_names,
help='dataset: ' + ' | '.join(data_names) + ' (default: nyudepthv2)')
parser.add_argument('--modality', '-m', metavar='MODALITY', default='rgb', choices=modality_names,
help='modality: ' + ' | '.join(modality_names) + ' (default: rgb)')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--print-freq', '-p', default=50, type=int,
metavar='N', help='print frequency (default: 50)')
parser.add_argument('-e', '--evaluate', default='', type=str, metavar='PATH',)
parser.add_argument('--gpu', default='0', type=str, metavar='N', help="gpu id")
parser.set_defaults(cuda=True)
args = parser.parse_args()
return args
def colored_depthmap(depth, d_min=None, d_max=None):
if d_min is None:
d_min = np.min(depth)
if d_max is None:
d_max = np.max(depth)
depth_relative = (depth - d_min) / (d_max - d_min)
return 255 * cmap(depth_relative)[:,:,:3] # H, W, C
def merge_into_row(input, depth_target, depth_pred):
rgb = 255 * np.transpose(np.squeeze(input.cpu().numpy()), (1,2,0)) # H, W, C
depth_target_cpu = np.squeeze(depth_target.cpu().numpy())
depth_pred_cpu = np.squeeze(depth_pred.data.cpu().numpy())
d_min = min(np.min(depth_target_cpu), np.min(depth_pred_cpu))
d_max = max(np.max(depth_target_cpu), np.max(depth_pred_cpu))
depth_target_col = colored_depthmap(depth_target_cpu, d_min, d_max)
depth_pred_col = colored_depthmap(depth_pred_cpu, d_min, d_max)
img_merge = np.hstack([rgb, depth_target_col, depth_pred_col])
return img_merge
def merge_into_row_with_gt(input, depth_input, depth_target, depth_pred):
rgb = 255 * np.transpose(np.squeeze(input.cpu().numpy()), (1,2,0)) # H, W, C
depth_input_cpu = np.squeeze(depth_input.cpu().numpy())
depth_target_cpu = np.squeeze(depth_target.cpu().numpy())
depth_pred_cpu = np.squeeze(depth_pred.data.cpu().numpy())
d_min = min(np.min(depth_input_cpu), np.min(depth_target_cpu), np.min(depth_pred_cpu))
d_max = max(np.max(depth_input_cpu), np.max(depth_target_cpu), np.max(depth_pred_cpu))
depth_input_col = colored_depthmap(depth_input_cpu, d_min, d_max)
depth_target_col = colored_depthmap(depth_target_cpu, d_min, d_max)
depth_pred_col = colored_depthmap(depth_pred_cpu, d_min, d_max)
img_merge = np.hstack([rgb, depth_input_col, depth_target_col, depth_pred_col])
return img_merge
def add_row(img_merge, row):
return np.vstack([img_merge, row])
def save_image(img_merge, filename):
img_merge = Image.fromarray(img_merge.astype('uint8'))
img_merge.save(filename)
|
pythonz/installer/versions.py | saghul/pythonz | 358 | 11137740 | versions = {
'cpython': {
'2.4': 'ff746de0fae8691c082414b42a2bb172da8797e6e8ff66c9a39d2e452f7034e9',
'2.4.1': 'f449c3b167389324c525ad99d02376c518ac11e163dbbbc13bc88a5c7101fd00',
'2.4.2': '2653e1846e87fd9b3ee287fefc965c80c54646548b4913a22265b0dd54493adf',
'2.4.3': '985a413932f5e31e6280b37da6b285a3a0b2748c6786643989ed9b23de97e2d5',
'2.4.4': '92be6e20cbc3111d9dd0c016d72ef7914c23b879dc52df7ba28df97afbf12e2e',
'2.4.5': '6ae6f67a388a7f70ed3a20eebab5aae995ee433089d1f1724095c62f4b7389a1',
'2.4.6': 'b03f269e826927f05c966cf4f4414f3c93ee2314960859e7f8375e24e82f8b02',
'2.5': 'd7bbf42e36003c6065cd19f3e67d283521858515ee923220f654131cebe1d8f2',
'2.5.1': '1f5caee846049ca30d996f9403eefdb996295c4af664867e35dcc5eb36e4e7e8',
'2.5.2': '834afe8a88adaf623b05ac5dd6700dd5bb5d0d5553fc74ad529359a3496e4ae3',
'2.5.3': 'c3fee607d20a77dfb72ea2e627eb4d95d25c735603435abde62c57015a0445bd',
'2.5.4': '3d3b205611ee503a38a9433d5645a571668420bb219242c7f51af85f05664da6',
'2.5.5': '03be1019c4fe93daeb53ba9e4294bf22a8ed4cb854cbd57e24e16f6bf63e2392',
'2.5.6': 'c2e4377597241b1065677d23327c04d0f41945d370c61a491cc88be367234c5d',
'2.6': '7c2f21a968a737a59ed0729f4b1dc154dc3aa183c20be96055186fe43c6742d0',
'2.6.1': 'fb65e93678e1327e3e8559cc56e1e00ed8c07162b21287a3502677892c5c515c',
'2.6.2': 'e37ecdf249f248f4fea227adbca09c778670b64fcb5e45947ec3e093cbc12c86',
'2.6.3': 'a71b55540690425fd82ab00819aeb92c1b23cbb4730a0ccd2e25c833b22a812e',
'2.6.4': '<KEY>',
'2.6.5': 'b331dafdce3361834fee783795d4f68ae7cf7d379e9137c2d8e8531cea615ede',
'2.6.6': '372f66db46d773214e4619df1794a26449158f626138d4d2141a64c2f017fae1',
'2.6.7': 'a8093eace4cfd3e06b05f0deb5d765e3c6cec65908048640a8cadd7a948b3826',
'2.6.8': '5bf02a75ffa2fcaa5a3cabb8201998519b045541975622316888ea468d9512f7',
'2.6.9': '7277b1285d8a82f374ef6ebaac85b003266f7939b3f2a24a3af52f9523ac94db',
'2.7': '5670dd6c0c93b0b529781d070852f7b51ce6855615b16afcd318341af2910fb5',
'2.7.1': 'ca13e7b1860821494f70de017202283ad73b1fb7bd88586401c54ef958226ec8',
'2.7.2': '1d54b7096c17902c3f40ffce7e5b84e0072d0144024184fff184a84d563abbb3',
'2.7.3': 'd4c20f2b5faf95999fd5fecb3f7d32071b0820516224a6d2b72932ab47a1cb8e',
'2.7.4': '98c5eb9c8e65effcc0122112ba17a0bce880aa23ecb560af56b55eb55632b81a',
'2.7.5': '8e1b5fa87b91835afb376a9c0d319d41feca07ffebc0288d97ab08d64f48afbf',
'2.7.6': '99c6860b70977befa1590029fae092ddb18db1d69ae67e8b9385b66ed104ba58',
'2.7.7': '7f49c0a6705ad89d925181e27d0aaa025ee4731ce0de64776c722216c3e66c42',
'2.7.8': '74d70b914da4487aa1d97222b29e9554d042f825f26cb2b93abd20fdda56b557',
'2.7.9': 'c8bba33e66ac3201dabdc556f0ea7cfe6ac11946ec32d357c4c6f9b018c12c5b',
'2.7.10': 'eda8ce6eec03e74991abb5384170e7c65fcd7522e409b8e83d7e6372add0f12a',
'2.7.11': '82929b96fd6afc8da838b149107078c02fa1744b7e60999a8babbc0d3fa86fc6',
'2.7.12': '3cb522d17463dfa69a155ab18cffa399b358c966c0363d6c8b5b3bf1384da4b6',
'2.7.13': 'a4f05a0720ce0fd92626f0278b6b433eee9a6173ddf2bced7957dfb599a5ece1',
'2.7.14': '304c9b202ea6fbd0a4a8e0ad3733715fbd4749f2204a9173a58ec53c32ea73e8',
'2.7.15': '18617d1f15a380a919d517630a9cd85ce17ea602f9bbdc58ddc672df4b0239db',
'2.7.16': '01da813a3600876f03f46db11cc5c408175e99f03af2ba942ef324389a83bad5',
'2.7.17': 'f22059d09cdf9625e0a7284d24a13062044f5bf59d93a7f3382190dfa94cecde',
'2.7.18': 'da3080e3b488f648a3d7a4560ddee895284c3380b11d6de75edb986526b9a814',
'3.0': '4d5d6ab2f893144a382ae1ea1de88a7825eb98111e26cfde627b9f3d9fd462b4',
'3.0.1': '7d5f2feae9035f1d3d9e6bb7f092dbf374d6bb4b25abd0d2d11f13bba1cb04de',
'3.1': '99a034cf574ea3c26412b0a0728126d7fd6ea9593d099d807a25d216ed031e6a',
'3.1.1': '5d85d7bff11c4db44920af99f64f4227c816f897f6bfa9dd8a2611165ca5f0a1',
'3.1.2': 'dffbc0561a161a4a576c6059e6990a9859a0be16ba9b5736eabe4abbb2700d1c',
'3.1.3': '6311823aeda8be6a7a2b67caaeff48abce6626c9940ba7ed81f9c978666a36bd',
'3.1.4': 'fadc05ea6d05360cff189944a85ecd2180bbc308784d168b350450e70bbdd846',
'3.1.5': 'd12dae6d06f52ef6bf1271db4d5b4d14b5dd39813e324314e72b648ef1bc0103',
'3.2': '27b35bfcbbf01de9564c0265d72b58ba3ff3d56df0615765372f2aa09dc20da9',
'3.2.1': '7cff29d984696d9fe8c7bea54da5b9ad36acef33ff5cf0d3e37e4d12fb21c572',
'3.2.2': 'acc6a13cb4fed0b7e86716324a8437e326645b8076177eede5a0cad99ec0313c',
'3.2.3': '74c33e165edef7532cef95fd9a325a06878b5bfc8a5d038161573f283eaf9809',
'3.2.4': '71c3139908ccc1c544ba1e331a3c22b3f1c09f562438a054fd6f4e2628de8b9a',
'3.2.5': '5eae0ab92a0bb9e3a1bf9c7cd046bc3de58996b049bd894d095978b6b085099f',
'3.3.0': 'cfe531eaace2503e13a74addc7f4a89482e99f8b8fca51b469ae5c83f450604e',
'3.3.1': '671dc3632f311e63c6733703aa0a1ad90c99277ddc8299d39e487718a50319bd',
'3.3.2': 'de664fca3b8e0ab20fb42bfed1a36e26f116f1853e88ada12dbc938761036172',
'3.3.3': '30b60839bfe0ae8a2dba11e909328459bb8ee4a258afe7494b06b2ceda080efc',
'3.3.4': 'ea055db9dd004a6ecd7690abc9734573763686dd768122316bae2dfd026412af',
'3.3.5': '916bc57dd8524dc27429bebae7b39d6942742cf9699b875b2b496a3d960c7168',
'3.3.6': '0a58ad1f1def4ecc90b18b0c410a3a0e1a48cf7692c75d1f83d0af080e5d2034',
'3.4.0': 'd2c83ea0217769a73e8b1ee33ffbca814903f8568e30f8d13e68e3d1f743449c',
'3.4.1': '8d007e3ef80b128a292be101201e75dec5480e5632e994771e7c231d17720b66',
'3.4.2': '44a3c1ef1c7ca3e4fd25242af80ed72da941203cb4ed1a8c1b724d9078965dd8',
'3.4.3': '8b743f56e9e50bf0923b9e9c45dd927c071d7aa56cd46569d8818add8cf01147',
'3.4.4': 'bc93e944025816ec360712b4c42d8d5f729eaed2b26585e9bc8844f93f0c382e',
'3.4.5': '997aca4dd8692f3c954658a3db11c1d0862bcbf8eadd6a164746eb33d317c034',
'3.4.6': 'fe59daced99549d1d452727c050ae486169e9716a890cffb0d468b376d916b48',
'3.4.7': '1614734847fd07e2a1ab1c65ae841db2433f8b845f49b34b7b5cabcb1c3f491f',
'3.4.8': '8b1a1ce043e132082d29a5d09f2841f193c77b631282a82f98895a5dbaba1639',
'3.4.9': 'e02e565372750a6678efe35ddecbe5ccd5330a8a2e8bbe38d3060713492e3dab',
'3.4.10': '217757699249ab432571b381386d441e12b433100ab5f908051fcb7cced2539d',
'3.5.0': '584e3d5a02692ca52fce505e68ecd77248a6f2c99adf9db144a39087336b0fe0',
'3.5.1': '687e067d9f391da645423c7eda8205bae9d35edc0c76ef5218dcbe4cc770d0d7',
'3.5.2': '1524b840e42cf3b909e8f8df67c1724012c7dc7f9d076d4feef2d3eff031e8a0',
'3.5.3': 'd8890b84d773cd7059e597dbefa510340de8336ec9b9e9032bf030f19291565a',
'3.5.4': '6ed87a8b6c758cc3299a8b433e8a9a9122054ad5bc8aad43299cff3a53d8ca44',
'3.5.5': '2f988db33913dcef17552fd1447b41afb89dbc26e3cdfc068ea6c62013a3a2a5',
'3.5.6': '30d2ff093988e74283e1abfee823292c6b59590796b9827e95ba4940b27d26f8',
'3.5.7': '542d94920a2a06a471a73b51614805ad65366af98145b0369bc374cf248b521b',
'3.5.8': '18c88dfd260147bc7247e6356010e5d4916dfbfc480f6434917f88e61228177a',
'3.5.9': '67a1d4fc6e4540d6a092cadc488e533afa961b3c9becc74dc3d6b55cb56e0cc1',
'3.5.10': '3496a0daf51913718a6f10e3eda51fa43634cb6151cb096f312d48bdbeff7d3a',
'3.6.0': '<KEY>',
'3.6.1': 'aa50b0143df7c89ce91be020fe41382613a817354b33acdc6641b44f8ced3828',
'3.6.2': '7919489310a5f17f7acbab64d731e46dca0702874840dadce8bd4b2b3b8e7a82',
'3.6.3': '<KEY>',
'3.6.4': '7dc453e1a93c083388eb1a23a256862407f8234a96dc4fae0fc7682020227486',
'3.6.5': '<KEY>',
'3.6.6': '7d56dadf6c7d92a238702389e80cfe66fbfae73e584189ed6f89c75bbf3eda58',
'3.6.7': '<KEY>',
'3.6.8': '7f5b1f08b3b0a595387ef6c64c85b1b13b38abef0dd871835ee923262e4f32f0',
'3.6.9': '47fc92a1dcb946b9ed0abc311d3767b7215c54e655b17fd1d3f9b538195525aa',
'3.6.10': '7034dd7cba98d4f94c74f9edd7345bac71c8814c41672c64d9044fa2f96f334d',
'3.6.11': '<KEY>',
'3.6.12': '12dddbe52385a0f702fb8071e12dcc6b3cb2dde07cd8db3ed60e90d90ab78693',
'3.6.13': '614950d3d54f6e78dac651b49c64cfe2ceefea5af3aff3371a9e4b27a53b2669',
'3.6.14': '70064897bc434d6eae8bcc3e5678f282b5ea776d60e695da548a1219ccfd27a5',
'3.7.0': '85bb9feb6863e04fb1700b018d9d42d1caac178559ffa453d7e6a436e259fd0d',
'3.7.1': '36c1b81ac29d0f8341f727ef40864d99d8206897be96be73dc34d4739c9c9f06',
'3.7.2': 'f09d83c773b9cc72421abba2c317e4e6e05d919f9bcf34468e192b6a6c8e328d',
'3.7.3': 'd62e3015f2f89c970ac52343976b406694931742fbde2fed8d1ce8ebb4e1f8ff',
'3.7.4': 'd63e63e14e6d29e17490abbe6f7d17afb3db182dbd801229f14e55f4157c4ba3',
'3.7.5': '<KEY>',
'3.7.6': 'aeee681c235ad336af116f08ab6563361a0c81c537072c1b309d6e4050aa2114',
'3.7.7': '8c8be91cd2648a1a0c251f04ea0bb4c2a5570feb9c45eaaa2241c785585b475a',
'3.7.8': '0e25835614dc221e3ecea5831b38fa90788b5389b99b675a751414c858789ab0',
'3.7.9': '39b018bc7d8a165e59aa827d9ae45c45901739b0bbb13721e4f973f3521c166a',
'3.7.10': 'c9649ad84dc3a434c8637df6963100b2e5608697f9ba56d82e3809e4148e0975',
'3.7.11': 'b4fba32182e16485d0a6022ba83c9251e6a1c14676ec243a9a07d3722cd4661a',
'3.8.0': 'f1069ad3cae8e7ec467aa98a6565a62a48ef196cb8f1455a245a08db5e1792df',
'3.8.1': 'c7cfa39a43b994621b245e029769e9126caa2a93571cee2e743b213cceac35fb',
'3.8.2': 'e634a7a74776c2b89516b2e013dda1728c89c8149b9863b8cea21946daf9d561',
'3.8.3': '6af6d4d2e010f9655518d0fc6738c7ff7069f10a4d2fbd55509e467f092a8b90',
'3.8.4': '32c4d9817ef11793da4d0d95b3191c4db81d2e45544614e8449255ca9ae3cc18',
'3.8.5': '015115023c382eb6ab83d512762fe3c5502fa0c6c52ffebc4831c4e1a06ffc49',
'3.8.6': '<KEY>',
'3.8.7': '20e5a04262f0af2eb9c19240d7ec368f385788bba2d8dfba7e74b20bab4d2bac',
'3.8.8': '76c0763f048e4f9b861d24da76b7dd5c7a3ba7ec086f40caedeea359263276f7',
'3.8.9': '9779ec1df000bf86914cdd40860b88da56c1e61db59d37784beca14a259ac9e9',
'3.8.10': 'b37ac74d2cbad2590e7cd0dd2b3826c29afe89a734090a87bf8c03c45066cb65',
'3.8.11': 'b77464ea80cec14581b86aeb7fb2ff02830e0abc7bcdc752b7b4bdfcd8f3e393',
'3.9.0': 'df796b2dc8ef085edae2597a41c1c0a63625ebd92487adaef2fed22b567873e8',
'3.9.1': '29cb91ba038346da0bd9ab84a0a55a845d872c341a4da6879f462e94c741f117',
'3.9.2': '7899e8a6f7946748830d66739f2d8f2b30214dad956e56b9ba216b3de5581519',
'3.9.4': '<KEY>',
'3.9.5': 'e0fbd5b6e1ee242524430dee3c91baf4cbbaba4a72dd1674b90fda87b713c7ab',
'3.9.6': 'd0a35182e19e416fc8eae25a3dcd4d02d4997333e4ad1f2eee6010aadc3fe866'
},
'stackless': {
'2.6.5': 'ac1956d7f9715cc56e92992d39b24f0869cd9955fd2b8cf52b163530194d07b1',
'2.7.2': 'e2e2706b22839e3e3f45085d0ec8030dd7374d8a65d3297981b7189a7c613197',
'3.1.3': '7aab20f509b5e3ad14a8e7c316401ac51377048ae85325ac3838a1494c019863',
'3.2.2': '779700f12b451a350fe7af4cd2849842adc7006dc83fe14712dd1a0999277b07',
'3.2.5': 'b021125e578ddd267d38feee9e1cbdb675f6aab247a2b88f4494abcf23babb05',
'3.3.5': '6558d1cb8c768ad95339fb9ca8b23106ce54c03ae67e9f75a84334d08489d240'
},
'pypy': {
'1.8': {
'linux': '9c293d8540780260718f8fd8dc433c97b614a31b115ccfe2d68df720ad7e55b1',
'linux64': '<KEY>',
'darwin': 'b823b6b919082cfb67861b8253313b877618672377164086c0364fa8eaa88b8a'
},
'1.9': {
'linux': '<KEY>',
'linux64': '4298252515e78c96f4ecd9f25be957411c060ece02d9213eef8d781cf528d18f',
'darwin': '4858f200e32c1070c77c1234ea0e9473eeda98bcd3832c4231f3e46e4e3b74b1'
},
'2.0': {
'linux': '275dbbee67eac527a1177403a0386b17d008740f83030544800d87994edd46b9',
'linux64': '14c716d53a507eece89606d547456b886dbdfc0ba6e3fb29062fafe86d1b6038',
'darwin': '<KEY>'
},
'2.0.1': {
'linux': '548686c5b95b424c79586d9a303ed41fca8eba52bd35c1527f39f5cd8fa35ea9',
'linux64': '0eb57e28f2bd5f2a4ad396df322de5adf711eb7d9a2bfeb8be2d9eb9e125c5cc',
'darwin': '337f2fda672827f2d706fd98e3344a83a8b80675e21b83dd6933da38d110c857'
},
'2.0.2': {
'linux': '<KEY>',
'linux64': '<KEY>',
'darwin': '34f5a7bf22a8bca3b9d79ae3186016c34638669ab19b4af6e38412181c757761'
},
'2.1': {
'linux': '9c0a38a40d3b4e642a159e51abef2827b33e3f7a254365daa24eae85d840eaf5',
'linux64': '<KEY>',
'darwin': 'd0d788c6d54bb866ace67a1740133cb5bc62357b5ca4783244097f1f648876f0'
},
'2.2': {
'linux': '<KEY>',
'linux64': '<KEY>',
'darwin': '<KEY>'
},
'2.2.1': {
'linux': '4d13483a0e13fc617a7b3d36918ed0e63cf07a7d2827c0a08132b80bc401a55a',
'linux64': '022d611ac62a276890d3e262f4f7cc839fcf9f5e1416df01dcd83ba335eacb16',
'darwin': '93e215dcffc9073acf41c63518f47fb59de60386aca4416cfe32190c7a096f29'
},
'2.3': {
'linux': '<KEY>',
'linux64': '777dbdd9c67ad1b8906288b01ae76bc9f7b80c95e967836f9a700a1679b80008',
'darwin': 'df7ca23ba6c8a63149d910b482be04f069b26dd1f7d0ca15e6342cac94e759d7'
},
'2.3.1': {
'linux': '<KEY>',
'linux64': 'dab7940496d96f1f255a8ef402fa96b94444775e373484e057d2fcabc3928b42',
'darwin': '<KEY>'
},
'2.4.0': {
'linux': 'a24adb366f87ac0eba829d7188a156a7d897e71893689fab06502c3f4152ac0e',
'linux64': '<KEY>',
'darwin': '<KEY>'
},
'2.5.0': {
'linux': '3dfd56a986d25929b4ed9f40a5484f72f1d513cd816cf8aaa683106c3391247c',
'linux64': '<KEY>',
'darwin': '30b392b969b54cde281b07f5c10865a7f2e11a229c46b8af384ca1d3fe8d4e6e'
},
'2.5.1': {
'linux': '<KEY>',
'linux64': '68e0955dbc80a0d51dfa9a8a76d8623f34920ece1bcbc6d910c2be019a653ba8',
'darwin': '<KEY>'
},
'2.6.0': {
'linux': '<KEY>',
'linux64': 'f5d2b0e3594cec57e32d3e43a951041ec330e1e962a836be470d591633e51388',
'darwin': '77f1d056484e40e0a8e2e2b2b489eedfe785605ef36b144ffce05f7b748f6acd'
},
'2.6.1': {
'linux': 'd010b1f1aafdb01beb107f16843985508ce81698260ce830690686d9b2768c88',
'linux64': '<KEY>',
'darwin': '4a78ef76ec38a49a9de40225c337e89486fa09938c600df2bd2dd60110066f65'
},
'4.0.0': {
'linux': '<KEY>',
'linux64': '30365cf4fa6cd8e9ff44126f06dcaebefda35c2543ddcf9b9e8516c29cabe726',
'darwin': 'd9e590fe5b9461bbdff56c76636e844ef90a297f82d0d2e204866c8a21759a50'
},
'4.0.1': {
'linux': '721920fcbb6aefc9a98e868e32b7f4ea5fd68b7f9305d08d0a2595327c9c0611',
'linux64': '0d6090cee59f4b9bab91ddbea76580d0c232b78dae65aaa9e8fa8d4449ba25b4',
'darwin': '<KEY>'
},
'5.0.0': {
'linux': None,
'linux64': None,
'darwin': None
},
'5.0.1': {
'linux': None,
'linux64': None,
'darwin': None
},
'5.1.0': {
'linux': None,
'linux64': None,
'darwin': None
},
'5.1.1': {
'linux': None,
'linux64': None,
'darwin': None
},
'5.3.0': {
'linux': None,
'linux64': None,
'darwin': None
},
'5.3.1': {
'linux': None,
'linux64': None,
'darwin': None
},
},
'pypy3': {
'2.3.1': {
'linux': '7eddc6826e58c9c89e68b59ec8caf1596b76401517ad8d26ad5e18e0ffa45db9',
'linux64': '303df2cf4766db20ec77786d9091dce284fdab01d7173c5828a35e86bc931b99',
'darwin': '600d4dad2039b8035582c0e0ce9b71e8236d95db26cff48c84c6d1e0ea6814c1'
},
'2.4.0': {
'linux': '108fdcccfddb9b2cb2fc3cbca5e6f7902ed3ab74a24c8ae29da7fbdadbab4345',
'linux64': '24e680b1742af7361107876a421dd793f5ef852dd5f097546f84b1378f7f70cc',
'darwin': 'dcd86bdb753e93dbf55e1f3af3ffa97eea328b8b77aa60e92ea2260a6258cedb'
}
},
'jython': {
'2.5.0': 'e3d8209ef9eb143df8101a5da6b3482cf457084e3a6247031fd510d71c13ab98',
'2.5.1': '229dfd1ed9728aa7e00c71f111d08fa777a4edcd03383779c74216765098f9c5',
'2.5.2': '1b7168b961e31ddd89012a36cde611c340dadfd8b60b81c4248b026730ee2f29',
'2.5.3': '05405966cdfa57abc8e705dd6aab92b8240097ce709fb916c8a0dbcaa491f99e',
'2.7.0': 'b44352ece72382268a60e2848741c96609a91d796bb9a9c6ebeff62f0c12c9cf'
}
}
|
test.py | ATrain951/01.python_function-milaan9 | 167 | 11137761 | <filename>test.py
# Python Module example
def fun():
print("something here inside fun()")
|
vis/python/plot_mesh.py | dfielding14/athena-public-version | 174 | 11137768 | #! /usr/bin/env python
"""
Script for plotting mesh structure in mesh_structure.dat (default) file produced
by running Athena++ with "-m <np>" argument.
Can optionally specify "-i <input_file>" and/or "-o <output_file>". Output
defaults to using "show()" command rather than saving to file.
"""
# Python modules
import argparse
# Main function
def main(**kwargs):
# Extract inputs
input_file = kwargs['input']
output_file = kwargs['output']
# Load Python plotting modules
if output_file != 'show':
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# not used explicitly, but required for 3D projections
from mpl_toolkits.mplot3d import Axes3D # noqa
# Read and plot block edges
fig = plt.figure()
ax = fig.gca(projection='3d')
x = []
y = []
z = []
with open(input_file) as f:
for line in f:
if line[0] != '\n' and line[0] != '#':
numbers_str = line.split()
x.append(float(numbers_str[0]))
y.append(float(numbers_str[1]))
# append zero if 2D
if(len(numbers_str) > 2):
z.append(float(numbers_str[2]))
else:
z.append(0.0)
if line[0] == '\n' and len(x) != 0:
ax.plot(x, y, z, 'k-')
x = []
y = []
z = []
if output_file == 'show':
plt.show()
else:
plt.savefig(output_file, bbox_inches='tight')
# Execute main function
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input',
default='mesh_structure.dat',
help='name of mesh structure file')
parser.add_argument('-o',
'--output',
default='show',
help=('name of output image file to create; omit to '
'display rather than save image'))
args = parser.parse_args()
main(**vars(args))
|
modules/roamresearch.py | hwiorn/orger | 241 | 11137815 | #!/usr/bin/env python3
from itertools import chain
from typing import Iterable
from orger import Mirror
from orger.inorganic import node, link, OrgNode
from orger.common import dt_heading
from orger import pandoc
import my.roamresearch as roamresearch
# todo ^^ ^^ things are highlight?
def roam_text_to_org(text: str) -> str:
"""
Cleans up Roam artifacts and adapts for better Org rendering
"""
for f, t in [
('{{[[slider]]}}', ''),
]:
text = text.replace(f, t)
org = pandoc.to_org(text, from_='markdown')
org = org.replace(r'\_', '_') # unescape, it's a bit aggressive..
return org
def roam_note_to_org(node: roamresearch.Node, top=False) -> Iterable[OrgNode]:
"""
Converts Roam node into Org-mode representation
"""
children = list(chain.from_iterable(map(roam_note_to_org, node.children)))
empty = len(node.body or '') == 0 and len(children) == 0
if empty:
# sometimes nodes are empty. two cases:
# - no heading -- child notes, like accidental enter presses I guess
# - heading -- notes that haven't been created yet
# just don't do anything in this case
# todo make this logic conditional?
return
title = node.title
# org-mode target allows jumping straight into
# conveniently, links in Roam are already represented as [[link]] !
target = '' if title is None else f'<<{title}>> '
heading = target + link(title='x', url=node.permalink)
todo = None
body = node.body
if body is not None:
for t in ('TODO', 'DONE'):
ts = '{{[[' + t + ']]}}'
if body.startswith(ts):
todo = t
body = body[len(ts):]
body = roam_text_to_org(body)
lines = body.splitlines(keepends=True)
# display first link of the body as the heading
if len(lines) > 0:
heading = heading + ' ' + lines[0]
body = ''.join(lines[1:])
if len(body) == 0:
body = None
if top:
heading = dt_heading(node.created, heading)
yield OrgNode(
todo=todo,
heading=heading,
body=body,
children=children,
)
class RoamView(Mirror):
def get_items(self):
rr = roamresearch.roam()
from concurrent.futures import ThreadPoolExecutor
# todo might be an overkill, only using because of pandoc..
with ThreadPoolExecutor() as pool:
items = list(chain.from_iterable(pool.map(roam_note_to_org, rr.notes)))
# move the ones with no children to the bottom
items = list(sorted(items, key=lambda n: len(n.children), reverse=True))
yield from items
if __name__ == '__main__':
RoamView.main()
|
app.py | kougou/DialogStateTracking | 246 | 11137834 | from flask import Flask, render_template, request
from flask import jsonify
import main as botmodule
# global
bot = None
app = Flask(__name__,static_url_path="/static")
'''
Routing
'''
# @GET
# PATH : /query
@app.route('/query', methods=['GET'])
def reply():
return bot.reply(request.args['msg'])
'''
return jsonify( {
'ans' : 'dummy text'
})
'''
# render page "index.html"
# PATH : /
@app.route("/")
def index():
return render_template("index.html")
if (__name__ == "__main__"):
# before starting the app, init model
bot = botmodule.main(['--ui', '--task_id=1'])
# start app
app.run(port = 5000)
|
it/structures/python3/default_naming-default/test.py | reproto/reproto | 108 | 11137849 | import lower_camel as lower_camel
import lower_snake as lower_snake
import upper_camel as upper_camel
import upper_snake as upper_snake
class Entry:
def __init__(self, _lower_camel, _lower_snake, _upper_camel, _upper_snake):
self._lower_camel = _lower_camel
self._lower_snake = _lower_snake
self._upper_camel = _upper_camel
self._upper_snake = _upper_snake
@property
def lower_camel(self):
return self._lower_camel
@property
def lower_snake(self):
return self._lower_snake
@property
def upper_camel(self):
return self._upper_camel
@property
def upper_snake(self):
return self._upper_snake
@staticmethod
def decode(data):
f_lower_camel = None
if "lower_camel" in data:
f_lower_camel = data["lower_camel"]
if f_lower_camel is not None:
f_lower_camel = lower_camel.Value.decode(f_lower_camel)
f_lower_snake = None
if "lower_snake" in data:
f_lower_snake = data["lower_snake"]
if f_lower_snake is not None:
f_lower_snake = lower_snake.Value.decode(f_lower_snake)
f_upper_camel = None
if "upper_camel" in data:
f_upper_camel = data["upper_camel"]
if f_upper_camel is not None:
f_upper_camel = upper_camel.Value.decode(f_upper_camel)
f_upper_snake = None
if "upper_snake" in data:
f_upper_snake = data["upper_snake"]
if f_upper_snake is not None:
f_upper_snake = upper_snake.Value.decode(f_upper_snake)
return Entry(f_lower_camel, f_lower_snake, f_upper_camel, f_upper_snake)
def encode(self):
data = dict()
if self._lower_camel is not None:
data["lower_camel"] = self._lower_camel.encode()
if self._lower_snake is not None:
data["lower_snake"] = self._lower_snake.encode()
if self._upper_camel is not None:
data["upper_camel"] = self._upper_camel.encode()
if self._upper_snake is not None:
data["upper_snake"] = self._upper_snake.encode()
return data
def __repr__(self):
return "<Entry lower_camel:{!r}, lower_snake:{!r}, upper_camel:{!r}, upper_snake:{!r}>".format(self._lower_camel, self._lower_snake, self._upper_camel, self._upper_snake)
|
Diagnostic/watcherutil.py | shridpant/azure-linux-extensions | 266 | 11137860 | <reponame>shridpant/azure-linux-extensions
#!/usr/bin/env python
#
# Azure Linux extension
#
# Linux Azure Diagnostic Extension (Current version is specified in manifest.xml)
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
import os
import datetime
import time
import string
import traceback
class Watcher:
"""
A class that handles periodic monitoring activities that are requested for LAD to perform.
The first such activity is to watch /etc/fstab and report (log to console) if there's anything
wrong with that. There might be other such monitoring activities that will be added later.
"""
def __init__(self, hutil_error, hutil_log, log_to_console=False):
"""
Constructor.
:param hutil_error: Error logging function (e.g., hutil.error). This is not a stream.
:param hutil_log: Normal logging function (e.g., hutil.log). This is not a stream.
:param log_to_console: Indicates whether to log any issues to /dev/console or not.
"""
# This is only for the /etc/fstab watcher feature.
self._fstab_last_mod_time = os.path.getmtime('/etc/fstab')
self._hutil_error = hutil_error
self._hutil_log = hutil_log
self._log_to_console = log_to_console
self._imds_logger = None
def _do_log_to_console_if_enabled(self, message):
"""
Write 'message' to console. Stolen from waagent LogToCon().
"""
if self._log_to_console:
try:
with open('/dev/console', 'w') as console:
message = filter(lambda x: x in string.printable, message)
console.write(message.encode('ascii', 'ignore') + '\n')
except IOError as e:
self._hutil_error('Error writing to console. Exception={0}'.format(e))
def handle_fstab(self, ignore_time=False):
"""
Watches if /etc/fstab is modified and verifies if it's OK. Otherwise, report it in logs or to /dev/console.
:param ignore_time: Disable the default logic of delaying /etc/fstab verification by 1 minute.
This is to allow any test code to avoid waiting 1 minute unnecessarily.
:return: None
"""
try_mount = False
if ignore_time:
try_mount = True
else:
current_mod_time = os.path.getmtime('/etc/fstab')
current_mod_date_time = datetime.datetime.fromtimestamp(current_mod_time)
# Only try to mount if it's been at least 1 minute since the
# change to fstab was done, to prevent spewing out erroneous spew
if (current_mod_time != self._fstab_last_mod_time and
datetime.datetime.now() > current_mod_date_time +
datetime.timedelta(minutes=1)):
try_mount = True
self._fstab_last_mod_time = current_mod_time
ret = 0
if try_mount:
ret = subprocess.call(['sudo', 'mount', '-a', '-vf'])
if ret != 0:
# There was an error running mount, so log
error_msg = 'fstab modification failed mount validation. Please correct before reboot.'
self._hutil_error(error_msg)
self._do_log_to_console_if_enabled(error_msg)
else:
# No errors
self._hutil_log('fstab modification passed mount validation')
return ret
def set_imds_logger(self, imds_logger):
self._imds_logger = imds_logger
def watch(self):
"""
Main loop performing various monitoring activities periodically.
Currently iterates every 5 minutes, and other periodic activities might be
added in the loop later.
:return: None
"""
while True:
# /etc/fstab watcher
self.handle_fstab()
# IMDS probe (only sporadically, inside the function)
if self._imds_logger:
try:
self._imds_logger.log_imds_data_if_right_time()
except Exception as e:
self._hutil_error('ImdsLogger exception: {0}\nStacktrace: {1}'.format(e, traceback.format_exc()))
# Sleep 5 minutes
time.sleep(60 * 5)
pass
|
python/test/utils/test_graph_converters/test_identity.py | daniel-falk/nnabla | 2,792 | 11137892 | <reponame>daniel-falk/nnabla
# Copyright 2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import numpy as np
import nnabla as nn
import nnabla.experimental.graph_converters as GC
from .ref_graphs.resnets import small_cf_resnet, small_id_resnet
from .ref_graphs.lenets import lenet, id_lenet
batch_size = 1
lenet_ref = id_lenet
resnet_ref = small_id_resnet
@pytest.mark.parametrize('seed', [313])
@pytest.mark.parametrize('test', [False, True])
@pytest.mark.parametrize('diff_batchsize', [True, False])
@pytest.mark.parametrize('graph_ref, graph_act', [(lenet_ref, lenet),
(resnet_ref, small_cf_resnet)])
def test_identity(seed, test, diff_batchsize, graph_ref, graph_act):
from .graph_converter_test_utils import structure_tester, value_tester
# Random number
np.random.seed(seed)
rng = np.random.RandomState(seed)
# Graph
x_data = rng.randn(batch_size, 3, 32, 32)
x = nn.Variable.from_numpy_array(x_data)
x1_data = rng.randn(128, 3, 32, 32)
x1 = nn.Variable.from_numpy_array(x1_data)
# Alter value and copy option
inp_x = x
cp_val = True
if diff_batchsize:
inp_x = x1
cp_val = False
y_tgt = graph_act(x, test=test)
# FunctionModifier
modifiers = []
modifiers.append(GC.IdentityModifier({x: inp_x}, copy_value=cp_val))
y_act = GC.GraphConverter(modifiers).convert(y_tgt)
# Ref Graph
y_ref = graph_ref(inp_x, test=test)
# Test
structure_tester(y_ref, y_act)
if diff_batchsize == False:
value_tester(y_tgt, y_act, rtol=6e-02, atol=5e-02)
|
jcvi/assembly/kmer.py | inspirewind/jcvi | 517 | 11137905 | <reponame>inspirewind/jcvi
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Deals with K-mers and K-mer distribution from reads or genome
"""
import os.path as op
import sys
import logging
import math
import numpy as np
from collections import defaultdict
from jcvi.graphics.base import (
plt,
adjust_spines,
asciiplot,
set_human_axis,
savefig,
markup,
panel_labels,
normalize_axes,
set_ticklabels_helvetica,
write_messages,
)
from jcvi.formats.fasta import Fasta
from jcvi.formats.base import BaseFile, must_open, get_number
from jcvi.utils.cbook import thousands, percentage
from jcvi.assembly.automaton import iter_project
from jcvi.apps.grid import MakeManager
from jcvi.apps.base import OptionParser, ActionDispatcher, sh, need_update, Popen, PIPE
KMERYL, KSOAP, KALLPATHS = range(3)
class KmerSpectrum(BaseFile):
def __init__(self, histfile):
super(KmerSpectrum, self).__init__(histfile)
self.load_data(histfile)
def load_data(self, histfile):
self.data = []
self.totalKmers = 0
self.hist = {}
kformat = self.guess_format(histfile)
kformats = ("Meryl", "Soap", "AllPaths")
logging.debug("Guessed format: {0}".format(kformats[kformat]))
fp = open(histfile)
for rowno, row in enumerate(fp):
if row[0] == "#":
continue
if kformat == KSOAP:
K = rowno + 1
counts = int(row.strip())
else: # meryl histogram
K, counts = row.split()[:2]
K, counts = int(K), int(counts)
Kcounts = K * counts
self.totalKmers += Kcounts
self.hist[K] = Kcounts
self.data.append((K, counts))
def guess_format(self, histfile):
# Guess the format of the Kmer histogram
fp = open(histfile)
for row in fp:
if row.startswith("# 1:"):
return KALLPATHS
if len(row.split()) == 1:
return KSOAP
return KMERYL
def get_xy(self, vmin=1, vmax=100):
self.counts = sorted((a, b) for a, b in self.hist.items() if vmin <= a <= vmax)
return zip(*self.counts)
def analyze(self, K=23, maxiter=100, method="nbinom"):
"""Analyze K-mer histogram.
Args:
K (int, optional): K-mer size. Defaults to 23.
maxiter (int): Iterations to run. Defaults to 100.
method (str, optional): Method to use, either 'nbinom' or
'allpaths'. Defaults to "nbinom".
Returns:
A dictionary containing info for annotating the plot. analyze() also
sets the following properties:
- lambda_: Main peak
- repetitive: Repeats message
- snprate: SNP rate message
"""
if method == "nbinom":
return self.analyze_nbinom(K=K, maxiter=maxiter)
return self.analyze_allpaths(K=K)
def analyze_nbinom(self, K=23, maxiter=100):
"""Analyze the K-mer histogram using negative binomial distribution.
Args:
K (int, optional): K-mer size used when generating the histogram. Defaults to 23.
"""
from scipy.stats import nbinom
from scipy.optimize import minimize_scalar
from functools import lru_cache
method, xopt = "bounded", "xatol"
MAX_1CN_SIZE = 1e10
MAX_OPTIMIZED_SIZE = 9.9e9
# Generate bins for the decomposed negative binomial distributions
bins = [
(i, i) for i in range(1, 9)
] # The first 8 CN are critical often determines ploidy
for i in (8, 16, 32, 64, 128, 256, 512): # 14 geometricly sized bins
a, b = i + 1, int(round(i * 2 ** 0.5))
bins.append((a, b))
a, b = b + 1, i * 2
bins.append((a, b))
# Convert histogram to np array so we can index by CN
kf_ceil = max([cov for cov, _ in self.data])
N = kf_ceil + 1
hist = np.zeros(N, dtype=int)
for cov, count in self.data:
hist[cov] = count
# min1: find first minimum
_kf_min1 = 5
while (
_kf_min1 - 1 >= 2
and hist[_kf_min1 - 1] * (_kf_min1 - 1) < hist[_kf_min1] * _kf_min1
):
_kf_min1 -= 1
while (
_kf_min1 <= kf_ceil
and hist[_kf_min1 + 1] * (_kf_min1 + 1) < hist[_kf_min1] * _kf_min1
):
_kf_min1 += 1
# max2: find absolute maximum mx2 above first minimum min1
_kf_max2 = _kf_min1
for kf in range(_kf_min1 + 1, int(0.8 * kf_ceil)):
if hist[kf] * kf > hist[_kf_max2] * _kf_max2:
_kf_max2 = kf
# Discard the last entry as that is usually an inflated number
hist = hist[:-1]
kf_range = np.arange(_kf_min1, len(hist), dtype=int)
P = hist[kf_range] * kf_range # Target distribution
print("==> Start nbinom method on range ({}, {})".format(_kf_min1, len(hist)))
# Below is the optimization schemes, we optimize one variable at a time
@lru_cache(maxsize=None)
def nbinom_pmf_range(lambda_: int, rho: int, bin_id: int):
stacked = np.zeros(len(kf_range), dtype=np.float64)
lambda_ /= 100 # 2-digit precision
rho /= 100 # 2-digit precision
n = lambda_ / (rho - 1)
p = 1 / rho
start, end = bins[bin_id]
for i in range(start, end + 1):
stacked += nbinom.pmf(kf_range, n * i, p)
return stacked
def generative_model(G, lambda_, rho):
stacked = np.zeros(len(kf_range), dtype=np.float64)
lambda_ = int(round(lambda_ * 100))
rho = int(round(rho * 100))
for bin_id, g in enumerate(G):
stacked += g * nbinom_pmf_range(lambda_, rho, bin_id)
stacked *= kf_range
return stacked
def func(lambda_, rho, G):
stacked = generative_model(G, lambda_, rho)
return np.sum((P - stacked) ** 2) # L2 norm
def optimize_func(lambda_, rho, G):
# Iterate over all G
for i, g in enumerate(G):
G_i = optimize_func_Gi(lambda_, rho, G, i)
if (
not 1 < G_i < MAX_OPTIMIZED_SIZE
): # Optimizer did not optimize this G_i
break
# Also remove the last bin since it is subject to marginal effect
G[i - 1] = 0
lambda_ = optimize_func_lambda_(lambda_, rho, G)
rho = optimize_func_rho(lambda_, rho, G)
score = func(lambda_, rho, G)
return lambda_, rho, G, score
def optimize_func_lambda_(lambda_, rho, G):
def f(arg):
return func(arg, rho, G)
res = minimize_scalar(
f, bounds=(_kf_min1, 100), method=method, options={xopt: 0.01}
)
return res.x
def optimize_func_rho(lambda_, rho, G):
def f(arg):
return func(lambda_, arg, G)
res = minimize_scalar(
f, bounds=(1.001, 5), method=method, options={xopt: 0.01}
)
return res.x
def optimize_func_Gi(lambda_, rho, G, i):
# Iterate a single G_i
def f(arg):
G[i] = arg
return func(lambda_, rho, G)
res = minimize_scalar(
f, bounds=(0, MAX_1CN_SIZE), method=method, options={xopt: 100}
)
return res.x
def run_optimization(termination=0.999, maxiter=100):
ll, rr, GG = l0, r0, G0
prev_score = np.inf
for i in range(maxiter):
print("Iteration", i + 1, file=sys.stderr)
ll, rr, GG, score = optimize_func(ll, rr, GG)
if score / prev_score > termination:
break
prev_score = score
if i % 10 == 0:
print(ll, rr, GG, score, file=sys.stderr)
print("Success!", file=sys.stderr)
# Remove bogus values that are close to the bounds
final_GG = [g for g in GG if 1 < g < MAX_OPTIMIZED_SIZE]
return ll, rr, final_GG
# Optimization - very slow
G0 = np.zeros(len(bins))
l0 = _kf_max2
r0 = 1.5
print(l0, r0, G0, file=sys.stderr)
ll, rr, GG = run_optimization(maxiter=maxiter)
print(ll, rr, GG, file=sys.stderr)
# Ready for genome summary
m = "\n==> Kmer (K={0}) Spectrum Analysis\n".format(K)
genome_size = int(round(self.totalKmers / ll))
inferred_genome_size = 0
for i, g in enumerate(GG):
start, end = bins[i]
mid = (start + end) / 2
inferred_genome_size += g * mid * (end - start + 1)
inferred_genome_size = int(round(inferred_genome_size))
genome_size = max(genome_size, inferred_genome_size)
m += "Genome size estimate = {0}\n".format(thousands(genome_size))
copy_series = []
copy_messages = []
for i, g in enumerate(GG):
start, end = bins[i]
mid = (start + end) / 2
copy_num = start if start == end else "{}-{}".format(start, end)
g_copies = int(round(g * mid * (end - start + 1)))
copy_series.append((mid, copy_num, g_copies, g))
copy_message = "CN {}: {:.1f} Mb ({:.1f} percent)".format(
copy_num, g_copies / 1e6, g_copies * 100 / genome_size
)
copy_messages.append(copy_message)
m += copy_message + "\n"
if genome_size > inferred_genome_size:
g_copies = genome_size - inferred_genome_size
copy_num = "{}+".format(end + 1)
copy_series.append((end + 1, copy_num, g_copies, g_copies / (end + 1)))
m += "CN {}: {:.1f} Mb ({:.1f} percent)\n".format(
copy_num, g_copies / 1e6, g_copies * 100 / genome_size
)
# Determine ploidy
def determine_ploidy(copy_series, threshold=0.15):
counts_so_far = 1
ploidy_so_far = 0
for mid, copy_num, g_copies, g in copy_series:
if g_copies / counts_so_far < threshold:
break
counts_so_far += g_copies
ploidy_so_far = mid
return int(ploidy_so_far)
ploidy = determine_ploidy(copy_series)
self.ploidy = ploidy
self.ploidy_message = "Ploidy: {}".format(ploidy)
m += self.ploidy_message + "\n"
self.copy_messages = copy_messages[:ploidy]
# Repeat content
def calc_repeats(copy_series, ploidy, genome_size):
unique = 0
for mid, copy_num, g_copies, g in copy_series:
if mid <= ploidy:
unique += g_copies
else:
break
return 1 - unique / genome_size
repeats = calc_repeats(copy_series, ploidy, genome_size)
self.repetitive = "Repeats: {:.1f} percent".format(repeats * 100)
m += self.repetitive + "\n"
# SNP rate
def calc_snp_rate(copy_series, ploidy, genome_size, K):
# We can calculate the SNP rate s, assuming K-mer of length K:
# s = 1-(1-L/G)^(1/K)
# L: # of unique K-mers under 'het' peak
# G: genome size
# K: K-mer length
L = 0
for mid, copy_num, g_copies, g in copy_series:
if mid < ploidy:
L += g
else:
break
return 1 - (1 - L / genome_size) ** (1 / K)
snp_rate = calc_snp_rate(copy_series, ploidy, genome_size, K)
self.snprate = "SNP rate: {:.2f} percent".format(snp_rate * 100)
m += self.snprate + "\n"
print(m, file=sys.stderr)
self.lambda_ = ll
return {
"generative_model": generative_model,
"Gbins": GG,
"lambda": ll,
"rho": rr,
"kf_range": kf_range,
}
def analyze_allpaths(self, ploidy=2, K=23, covmax=1000000):
"""
Analyze Kmer spectrum, calculations derived from
allpathslg/src/kmers/KmerSpectra.cc
"""
from math import sqrt
data = self.data
kf_ceil = max(K for (K, c) in data)
if kf_ceil > covmax:
exceeds = sum(1 for (K, c) in data if K > covmax)
logging.debug(
"A total of {0} distinct K-mers appear > "
"{1} times. Ignored ...".format(exceeds, covmax)
)
kf_ceil = covmax
nkf = kf_ceil + 1
a = [0] * nkf
for kf, c in data:
if kf > kf_ceil:
continue
a[kf] = c
ndk = a # number of distinct kmers
nk = [k * c for k, c in enumerate(a)] # number of kmers
cndk = [0] * nkf # cumulative number of distinct kmers
cnk = [0] * nkf # cumulative number of kmers
for kf in range(1, nkf):
cndk[kf] = cndk[kf - 1] + 0.5 * (ndk[kf - 1] + ndk[kf])
cnk[kf] = cnk[kf - 1] + 0.5 * (nk[kf - 1] + nk[kf])
# Separate kmer spectrum in 5 regions based on the kf
# 1 ... kf_min1 : bad kmers with low frequency
# kf_min1 ... kf_min2 : good kmers CN = 1/2 (SNPs)
# kf_min2 ... kf_min3 : good kmers CN = 1
# kf_min3 ... kf_hi : good kmers CN > 1 (repetitive)
# kf_hi ... inf : bad kmers with high frequency
# min1: find first minimum
_kf_min1 = 10
while _kf_min1 - 1 >= 2 and nk[_kf_min1 - 1] < nk[_kf_min1]:
_kf_min1 -= 1
while _kf_min1 <= kf_ceil and nk[_kf_min1 + 1] < nk[_kf_min1]:
_kf_min1 += 1
# max2: find absolute maximum mx2 above first minimum min1
_kf_max2 = _kf_min1
for kf in range(_kf_min1 + 1, int(0.8 * kf_ceil)):
if nk[kf] > nk[_kf_max2]:
_kf_max2 = kf
# max2: resetting max2 for cases of very high polymorphism
if ploidy == 2:
ndk_half = ndk[_kf_max2 // 2]
ndk_double = ndk[_kf_max2 * 2]
if ndk_double > ndk_half:
_kf_max2 *= 2
# max1: SNPs local maximum max1 as half global maximum max2
_kf_max1 = _kf_max2 // 2
# min2: SNPs local minimum min2 between max1 and max2
_kf_min2 = (
_kf_max1
* (2 * ndk[_kf_max1] + ndk[_kf_max2])
// (ndk[_kf_max1] + ndk[_kf_max2])
)
# min1: refine between min1 and max2/2
for kf in range(_kf_min1 + 1, _kf_max1):
if nk[kf] < nk[_kf_min1]:
_kf_min1 = kf
# min3: not a minimum, really. upper edge of main peak
_kf_min3 = _kf_max2 * 3 // 2
print("kfs:", _kf_min1, _kf_max1, _kf_min2, _kf_max2, _kf_min3, file=sys.stderr)
self.min1 = _kf_min1
self.max1 = _kf_max1
self.min2 = _kf_min2
self.max2 = _kf_max2
self.min3 = _kf_min3
self.lambda_ = self.max2 # Main peak
# Define maximum kf above which we neglect data
_kf_hi = (
_kf_max2 * sqrt(4 * ndk[2 * _kf_max2] * _kf_max2)
if 2 * _kf_max2 < len(ndk)
else _kf_max2 * sqrt(4 * ndk[len(ndk) - 1] * _kf_max2)
)
_kf_hi = int(_kf_hi)
if _kf_hi > kf_ceil:
_kf_hi = kf_ceil
_nk_total = cnk[len(cnk) - 1]
_nk_bad_low_kf = cnk[_kf_min1]
_nk_good_uniq = cnk[_kf_min3] - cnk[_kf_min2]
_nk_bad_high_kf = _nk_total - cnk[_kf_hi]
_ndk_good_snp = cndk[_kf_min2] - cndk[_kf_min1]
_ndk_good_uniq = cndk[_kf_min3] - cndk[_kf_min2]
# kmer coverage C_k
_kf_ave_uniq = _nk_good_uniq * 1.0 / _ndk_good_uniq
_genome_size = (_nk_total - _nk_bad_low_kf - _nk_bad_high_kf) / _kf_ave_uniq
_genome_size_unique = _ndk_good_uniq + _ndk_good_snp / 2
_genome_size_repetitive = _genome_size - _genome_size_unique
_coverage = _nk_total / _genome_size if _genome_size else 0
# SNP rate estimation, assumes uniform distribution of SNPs over the
# genome and accounts for the reduction in SNP kmer counts when
# polymorphism is very high
if ploidy == 2:
_d_SNP = (
1.0 / (1.0 - (1.0 - 0.5 * _ndk_good_snp / _genome_size) ** (1.0 / K))
if _ndk_good_snp > 0
else 1000000
)
G = int(_genome_size)
G1 = int(_genome_size_unique)
GR = int(_genome_size_repetitive)
coverage = int(_coverage)
m = "Kmer (K={0}) Spectrum Analysis\n".format(K)
m += "Genome size estimate = {0}\n".format(thousands(G))
m += "Genome size estimate CN = 1 = {0} ({1})\n".format(
thousands(G1), percentage(G1, G)
)
m += "Genome size estimate CN > 1 = {0} ({1})\n".format(
thousands(GR), percentage(GR, G)
)
m += "Coverage estimate: {0} x\n".format(coverage)
self.repetitive = "Repeats: {0} percent".format(GR * 100 // G)
if ploidy == 2:
d_SNP = int(_d_SNP)
self.snprate = "SNP rate ~= 1/{0}".format(d_SNP)
else:
self.snprate = "SNP rate not computed (Ploidy = {0})".format(ploidy)
m += self.snprate + "\n"
self.genomesize = int(round(self.totalKmers * 1.0 / self.max2))
print(m, file=sys.stderr)
return {}
class KMCComplex(object):
def __init__(self, indices):
self.indices = indices
def write(self, outfile, filename="stdout", action="union"):
assert action in ("union", "intersect")
op = " + sum " if action == "union" else " * "
fw = must_open(filename, "w")
print("INPUT:", file=fw)
ss = []
pad = len(str(len(self.indices)))
for i, e in enumerate(self.indices):
s = "s{0:0{1}d}".format(i + 1, pad)
ss.append(s)
print("{} = {}".format(s, e.rsplit(".", 1)[0]), file=fw)
print("OUTPUT:", file=fw)
print("{} = {}".format(outfile, op.join(ss)), file=fw)
fw.close()
def main():
actions = (
# K-mer counting
("jellyfish", "count kmers using `jellyfish`"),
("meryl", "count kmers using `meryl`"),
("kmc", "count kmers using `kmc`"),
("kmcop", "intersect or union kmc indices"),
("entropy", "calculate entropy for kmers from kmc dump"),
("bed", "map kmers on FASTA"),
# K-mer histogram
("histogram", "plot the histogram based on meryl K-mer distribution"),
("multihistogram", "plot histogram across a set of K-mer sizes"),
# These forms a pipeline to count K-mers for given FASTA seq
("dump", "convert FASTA sequences to list of K-mers"),
("bin", "serialize counts to bitarrays"),
("bincount", "count K-mers in the bin"),
("count", "run dump - jellyfish - bin - bincount in serial"),
("logodds", "compute log likelihood between two db"),
("model", "model kmer distribution given error rate"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def entropy_score(kmer):
"""
Schmieder and Edwards. Quality control and preprocessing of metagenomic datasets. (2011) Bioinformatics
https://academic.oup.com/bioinformatics/article/27/6/863/236283/Quality-control-and-preprocessing-of-metagenomic
"""
l = len(kmer) - 2
k = l if l < 64 else 64
counts = defaultdict(int)
for i in range(l):
trinuc = kmer[i : i + 3]
counts[trinuc] += 1
logk = math.log(k)
res = 0
for k, v in counts.items():
f = v * 1.0 / l
res += f * math.log(f) / logk
return res * -100
def entropy(args):
"""
%prog entropy kmc_dump.out
kmc_dump.out contains two columns:
AAAAAAAAAAAGAAGAAAGAAA 34
"""
p = OptionParser(entropy.__doc__)
p.add_option(
"--threshold", default=0, type="int", help="Complexity needs to be above"
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(kmc_out,) = args
fp = open(kmc_out)
for row in fp:
kmer, count = row.split()
score = entropy_score(kmer)
if score >= opts.threshold:
print(" ".join((kmer, count, "{:.2f}".format(score))))
def bed(args):
"""
%prog bed fastafile kmer.dump.txt
Map kmers on FASTA.
"""
from jcvi.formats.fasta import rc, parse_fasta
p = OptionParser(bed.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, dumpfile = args
fp = open(dumpfile)
KMERS = set()
for row in fp:
kmer = row.split()[0]
kmer_rc = rc(kmer)
KMERS.add(kmer)
KMERS.add(kmer_rc)
K = len(kmer)
logging.debug("Imported {} {}-mers".format(len(KMERS), K))
for name, seq in parse_fasta(fastafile):
name = name.split()[0]
for i in range(len(seq) - K):
if i % 5000000 == 0:
print("{}:{}".format(name, i), file=sys.stderr)
kmer = seq[i : i + K]
if kmer in KMERS:
print("\t".join(str(x) for x in (name, i, i + K, kmer)))
def kmcop(args):
"""
%prog kmcop *.kmc_suf
Intersect or union kmc indices.
"""
p = OptionParser(kmcop.__doc__)
p.add_option(
"--action", choices=("union", "intersect"), default="union", help="Action"
)
p.add_option("-o", default="results", help="Output name")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
indices = args
ku = KMCComplex(indices)
ku.write(opts.o, action=opts.action)
def kmc(args):
"""
%prog kmc folder
Run kmc3 on Illumina reads.
"""
p = OptionParser(kmc.__doc__)
p.add_option("-k", default=21, type="int", help="Kmer size")
p.add_option(
"--ci", default=2, type="int", help="Exclude kmers with less than ci counts"
)
p.add_option("--cs", default=2, type="int", help="Maximal value of a counter")
p.add_option(
"--cx", default=None, type="int", help="Exclude kmers with more than cx counts"
)
p.add_option(
"--single",
default=False,
action="store_true",
help="Input is single-end data, only one FASTQ/FASTA",
)
p.add_option(
"--fasta",
default=False,
action="store_true",
help="Input is FASTA instead of FASTQ",
)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(folder,) = args
K = opts.k
n = 1 if opts.single else 2
pattern = (
"*.fa,*.fa.gz,*.fasta,*.fasta.gz"
if opts.fasta
else "*.fq,*.fq.gz,*.fastq,*.fastq.gz"
)
mm = MakeManager()
for p, pf in iter_project(folder, pattern=pattern, n=n, commonprefix=False):
pf = pf.split("_")[0] + ".ms{}".format(K)
infiles = pf + ".infiles"
fw = open(infiles, "w")
print("\n".join(p), file=fw)
fw.close()
cmd = "kmc -k{} -m64 -t{}".format(K, opts.cpus)
cmd += " -ci{} -cs{}".format(opts.ci, opts.cs)
if opts.cx:
cmd += " -cx{}".format(opts.cx)
if opts.fasta:
cmd += " -fm"
cmd += " @{} {} .".format(infiles, pf)
outfile = pf + ".kmc_suf"
mm.add(p, outfile, cmd)
mm.write()
def meryl(args):
"""
%prog meryl folder
Run meryl on Illumina reads.
"""
p = OptionParser(meryl.__doc__)
p.add_option("-k", default=19, type="int", help="Kmer size")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(folder,) = args
K = opts.k
cpus = opts.cpus
mm = MakeManager()
for p, pf in iter_project(folder):
cmds = []
mss = []
for i, ip in enumerate(p):
ms = "{}{}.ms{}".format(pf, i + 1, K)
mss.append(ms)
cmd = "meryl -B -C -m {} -threads {}".format(K, cpus)
cmd += " -s {} -o {}".format(ip, ms)
cmds.append(cmd)
ams, bms = mss
pms = "{}.ms{}".format(pf, K)
cmd = "meryl -M add -s {} -s {} -o {}".format(ams, bms, pms)
cmds.append(cmd)
cmd = "rm -f {}.mcdat {}.mcidx {}.mcdat {}.mcidx".format(ams, ams, bms, bms)
cmds.append(cmd)
mm.add(p, pms + ".mcdat", cmds)
mm.write()
def model(args):
"""
%prog model erate
Model kmer distribution given error rate. See derivation in FIONA paper:
<http://bioinformatics.oxfordjournals.org/content/30/17/i356.full>
"""
from scipy.stats import binom, poisson
p = OptionParser(model.__doc__)
p.add_option("-k", default=23, type="int", help="Kmer size")
p.add_option("--cov", default=50, type="int", help="Expected coverage")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(erate,) = args
erate = float(erate)
cov = opts.cov
k = opts.k
xy = []
# Range include c although it is unclear what it means to have c=0
for c in range(0, cov * 2 + 1):
Prob_Yk = 0
for i in range(k + 1):
# Probability of having exactly i errors
pi_i = binom.pmf(i, k, erate)
# Expected coverage of kmer with exactly i errors
mu_i = cov * (erate / 3) ** i * (1 - erate) ** (k - i)
# Probability of seeing coverage of c
Prob_Yk_i = poisson.pmf(c, mu_i)
# Sum i over 0, 1, ... up to k errors
Prob_Yk += pi_i * Prob_Yk_i
xy.append((c, Prob_Yk))
x, y = zip(*xy)
asciiplot(x, y, title="Model")
def logodds(args):
"""
%prog logodds cnt1 cnt2
Compute log likelihood between two db.
"""
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cnt1, cnt2 = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
scf, c1 = row.split()
c2 = d[scf]
c1, c2 = float(c1), float(c2)
c1 += 1
c2 += 1
score = int(100 * (log(c1) - log(c2)))
print("{0}\t{1}".format(scf, score))
def get_K(jfdb):
"""
Infer K from jellyfish db.
"""
j = jfdb.rsplit("_", 1)[0].rsplit("-", 1)[-1]
assert j[0] == "K"
return int(j[1:])
def count(args):
"""
%prog count fastafile jf.db
Run dump - jellyfish - bin - bincount in serial.
"""
from bitarray import bitarray
p = OptionParser(count.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, jfdb = args
K = get_K(jfdb)
cmd = "jellyfish query {0} -C | cut -d' ' -f 2".format(jfdb)
t = must_open("tmp", "w")
proc = Popen(cmd, stdin=PIPE, stdout=t)
t.flush()
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=proc.stdin)
proc.stdin.close()
logging.debug(cmd)
proc.wait()
a = bitarray()
binfile = ".".join((fastafile, jfdb, "bin"))
fw = open(binfile, "w")
t.seek(0)
for row in t:
c = row.strip()
a.append(int(c))
a.tofile(fw)
logging.debug("Serialize {0} bits to `{1}`.".format(len(a), binfile))
fw.close()
sh("rm {0}".format(t.name))
logging.debug(
"Shared K-mers (K={0}) between `{1}` and `{2}` written to `{3}`.".format(
K, fastafile, jfdb, binfile
)
)
cntfile = ".".join((fastafile, jfdb, "cnt"))
bincount([fastafile, binfile, "-o", cntfile, "-K {0}".format(K)])
logging.debug("Shared K-mer counts written to `{0}`.".format(cntfile))
def bincount(args):
"""
%prog bincount fastafile binfile
Count K-mers in the bin.
"""
from bitarray import bitarray
from jcvi.formats.sizes import Sizes
p = OptionParser(bincount.__doc__)
p.add_option("-K", default=23, type="int", help="K-mer size")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastafile, binfile = args
K = opts.K
fp = open(binfile)
a = bitarray()
a.fromfile(fp)
f = Sizes(fastafile)
tsize = 0
fw = must_open(opts.outfile, "w")
for name, seqlen in f.iter_sizes():
ksize = seqlen - K + 1
b = a[tsize : tsize + ksize]
bcount = b.count()
print("\t".join(str(x) for x in (name, bcount)), file=fw)
tsize += ksize
def bin(args):
"""
%prog bin filename filename.bin
Serialize counts to bitarrays.
"""
from bitarray import bitarray
p = OptionParser(bin.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
inp, outp = args
fp = must_open(inp)
fw = must_open(outp, "w")
a = bitarray()
for row in fp:
c = row.split()[-1]
a.append(int(c))
a.tofile(fw)
fw.close()
def make_kmers(seq, K):
seq = str(seq).upper().replace("N", "A")
seqlen = len(seq)
for i in range(seqlen - K + 1):
yield seq[i : i + K]
def dump(args):
"""
%prog dump fastafile
Convert FASTA sequences to list of K-mers.
"""
p = OptionParser(dump.__doc__)
p.add_option("-K", default=23, type="int", help="K-mer size")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(fastafile,) = args
K = opts.K
fw = must_open(opts.outfile, "w")
f = Fasta(fastafile, lazy=True)
for name, rec in f.iteritems_ordered():
kmers = list(make_kmers(rec.seq, K))
print("\n".join(kmers), file=fw)
fw.close()
def jellyfish(args):
"""
%prog jellyfish [*.fastq|*.fasta]
Run jellyfish to dump histogram to be used in kmer.histogram().
"""
from jcvi.apps.base import getfilesize
from jcvi.utils.cbook import human_size
p = OptionParser(jellyfish.__doc__)
p.add_option("-K", default=23, type="int", help="K-mer size")
p.add_option(
"--coverage",
default=40,
type="int",
help="Expected sequence coverage",
)
p.add_option("--prefix", default="jf", help="Database prefix")
p.add_option(
"--nohist",
default=False,
action="store_true",
help="Do not print histogram",
)
p.set_home("jellyfish")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
K = opts.K
coverage = opts.coverage
totalfilesize = sum(getfilesize(x) for x in fastqfiles)
fq = fastqfiles[0]
pf = opts.prefix
gzip = fq.endswith(".gz")
hashsize = totalfilesize / coverage
logging.debug(
"Total file size: {0}, hashsize (-s): {1}".format(
human_size(totalfilesize, a_kilobyte_is_1024_bytes=True), hashsize
)
)
jfpf = "{0}-K{1}".format(pf, K)
jfdb = jfpf
fastqfiles = " ".join(fastqfiles)
jfcmd = op.join(opts.jellyfish_home, "jellyfish")
cmd = jfcmd
cmd += " count -t {0} -C -o {1}".format(opts.cpus, jfpf)
cmd += " -s {0} -m {1}".format(hashsize, K)
if gzip:
cmd = "gzip -dc {0} | ".format(fastqfiles) + cmd + " /dev/fd/0"
else:
cmd += " " + fastqfiles
if need_update(fastqfiles, jfdb):
sh(cmd)
if opts.nohist:
return
jfhisto = jfpf + ".histogram"
cmd = jfcmd + " histo -t 64 {0} -o {1}".format(jfdb, jfhisto)
if need_update(jfdb, jfhisto):
sh(cmd)
def merylhistogram(merylfile):
"""
Run meryl to dump histogram to be used in kmer.histogram(). The merylfile
are the files ending in .mcidx or .mcdat.
"""
pf, sf = op.splitext(merylfile)
outfile = pf + ".histogram"
if need_update(merylfile, outfile):
cmd = "meryl -Dh -s {0}".format(pf)
sh(cmd, outfile=outfile)
return outfile
def multihistogram(args):
"""
%prog multihistogram *.histogram species
Plot the histogram based on a set of K-mer hisotograms. The method is based
on Star et al.'s method (Atlantic Cod genome paper).
"""
p = OptionParser(multihistogram.__doc__)
p.add_option("--kmin", default=15, type="int", help="Minimum K-mer size, inclusive")
p.add_option("--kmax", default=30, type="int", help="Maximum K-mer size, inclusive")
p.add_option("--vmin", default=2, type="int", help="Minimum value, inclusive")
p.add_option("--vmax", default=100, type="int", help="Maximum value, inclusive")
opts, args, iopts = p.set_image_options(args, figsize="10x5", dpi=300)
if len(args) < 1:
sys.exit(not p.print_help())
histfiles = args[:-1]
species = args[-1]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
A = fig.add_axes([0.08, 0.12, 0.38, 0.76])
B = fig.add_axes([0.58, 0.12, 0.38, 0.76])
lines = []
legends = []
genomesizes = []
for histfile in histfiles:
ks = KmerSpectrum(histfile)
x, y = ks.get_xy(opts.vmin, opts.vmax)
K = get_number(op.basename(histfile).split(".")[0].split("-")[-1])
if not opts.kmin <= K <= opts.kmax:
continue
(line,) = A.plot(x, y, "-", lw=1)
lines.append(line)
legends.append("K = {0}".format(K))
ks.analyze(K=K, method="allpaths")
genomesizes.append((K, ks.genomesize / 1e6))
leg = A.legend(lines, legends, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
title = "{0} genome K-mer histogram".format(species)
A.set_title(markup(title))
xlabel, ylabel = "Coverage (X)", "Counts"
A.set_xlabel(xlabel)
A.set_ylabel(ylabel)
set_human_axis(A)
title = "{0} genome size estimate".format(species)
B.set_title(markup(title))
x, y = zip(*genomesizes)
B.plot(x, y, "ko", mfc="w")
t = np.linspace(opts.kmin - 0.5, opts.kmax + 0.5, 100)
p = np.poly1d(np.polyfit(x, y, 2))
B.plot(t, p(t), "r:")
xlabel, ylabel = "K-mer size", "Estimated genome size (Mb)"
B.set_xlabel(xlabel)
B.set_ylabel(ylabel)
set_ticklabels_helvetica(B)
labels = ((0.04, 0.96, "A"), (0.54, 0.96, "B"))
panel_labels(root, labels)
normalize_axes(root)
imagename = species + ".multiK.pdf"
savefig(imagename, dpi=iopts.dpi, iopts=iopts)
def histogram(args):
"""
%prog histogram meryl.histogram species K
Plot the histogram based on meryl K-mer distribution, species and N are
only used to annotate the graphic.
"""
p = OptionParser(histogram.__doc__)
p.add_option(
"--vmin",
dest="vmin",
default=1,
type="int",
help="minimum value, inclusive",
)
p.add_option(
"--vmax",
dest="vmax",
default=100,
type="int",
help="maximum value, inclusive",
)
p.add_option(
"--pdf",
default=False,
action="store_true",
help="Print PDF instead of ASCII plot",
)
p.add_option(
"--method",
choices=("nbinom", "allpaths"),
default="nbinom",
help="'nbinom' - slow but more accurate for het or polyploid genome; 'allpaths' - fast and works for homozygous enomes",
)
p.add_option(
"--maxiter",
default=100,
type="int",
help="Max iterations for optimization. Only used with --method nbinom",
)
p.add_option(
"--coverage", default=0, type="int", help="Kmer coverage [default: auto]"
)
p.add_option(
"--nopeaks",
default=False,
action="store_true",
help="Do not annotate K-mer peaks",
)
opts, args, iopts = p.set_image_options(args, figsize="7x7")
if len(args) != 3:
sys.exit(not p.print_help())
histfile, species, N = args
method = opts.method
vmin, vmax = opts.vmin, opts.vmax
ascii = not opts.pdf
peaks = not opts.nopeaks and method == "allpaths"
N = int(N)
if histfile.rsplit(".", 1)[-1] in ("mcdat", "mcidx"):
logging.debug("CA kmer index found")
histfile = merylhistogram(histfile)
ks = KmerSpectrum(histfile)
method_info = ks.analyze(K=N, maxiter=opts.maxiter, method=method)
Total_Kmers = int(ks.totalKmers)
coverage = opts.coverage
Kmer_coverage = ks.lambda_ if not coverage else coverage
Genome_size = int(round(Total_Kmers * 1.0 / Kmer_coverage))
Total_Kmers_msg = "Total {0}-mers: {1}".format(N, thousands(Total_Kmers))
Kmer_coverage_msg = "{0}-mer coverage: {1:.1f}x".format(N, Kmer_coverage)
Genome_size_msg = "Estimated genome size: {0:.1f} Mb".format(Genome_size / 1e6)
Repetitive_msg = ks.repetitive
SNPrate_msg = ks.snprate
for msg in (Total_Kmers_msg, Kmer_coverage_msg, Genome_size_msg):
print(msg, file=sys.stderr)
x, y = ks.get_xy(vmin, vmax)
title = "{0} {1}-mer histogram".format(species, N)
if ascii:
asciiplot(x, y, title=title)
return Genome_size
plt.figure(1, (iopts.w, iopts.h))
plt.bar(x, y, fc="#b2df8a", lw=0)
# Plot the negative binomial fit
if method == "nbinom":
generative_model = method_info["generative_model"]
GG = method_info["Gbins"]
ll = method_info["lambda"]
rr = method_info["rho"]
kf_range = method_info["kf_range"]
stacked = generative_model(GG, ll, rr)
plt.plot(
kf_range,
stacked,
":",
color="#6a3d9a",
lw=2,
)
ax = plt.gca()
if peaks: # Only works for method 'allpaths'
t = (ks.min1, ks.max1, ks.min2, ks.max2, ks.min3)
tcounts = [(x, y) for x, y in ks.counts if x in t]
if tcounts:
x, y = zip(*tcounts)
tcounts = dict(tcounts)
plt.plot(x, y, "ko", lw=3, mec="k", mfc="w")
ax.text(ks.max1, tcounts[ks.max1], "SNP peak")
ax.text(ks.max2, tcounts[ks.max2], "Main peak")
ymin, ymax = ax.get_ylim()
ymax = ymax * 7 / 6
if method == "nbinom":
# Plot multiple CN locations, CN1, CN2, ... up to ploidy
cn_color = "#a6cee3"
for i in range(1, ks.ploidy + 1):
x = i * ks.lambda_
plt.plot((x, x), (0, ymax), "-.", color=cn_color)
plt.text(
x,
ymax * 0.95,
"CN{}".format(i),
ha="right",
va="center",
color=cn_color,
rotation=90,
)
messages = [
Total_Kmers_msg,
Kmer_coverage_msg,
Genome_size_msg,
Repetitive_msg,
SNPrate_msg,
]
if method == "nbinom":
messages += [ks.ploidy_message] + ks.copy_messages
write_messages(ax, messages)
ax.set_title(markup(title))
ax.set_xlim((0, vmax))
ax.set_ylim((0, ymax))
adjust_spines(ax, ["left", "bottom"], outward=True)
xlabel, ylabel = "Coverage (X)", "Counts"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
set_human_axis(ax)
imagename = histfile.split(".")[0] + "." + iopts.format
savefig(imagename, dpi=100)
return Genome_size
if __name__ == "__main__":
main()
|
scripts/external_libs/ansi2html/ansi2html/style.py | timgates42/trex-core | 956 | 11137920 | # This file is part of ansi2html.
# Copyright (C) 2012 <NAME> <<EMAIL>>
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
class Rule(object):
def __init__(self, klass, **kw):
self.klass = klass
self.kw = '; '.join([(k.replace('_', '-')+': '+kw[k])
for k in sorted(kw.keys())]).strip()
self.kwl = [(k.replace('_', '-'), kw[k][1:])
for k in sorted(kw.keys())]
def __str__(self):
return '%s { %s; }' % (self.klass, self.kw)
def index(r, g, b):
return str(16 + (r * 36) + (g * 6) + b)
def color(r, g, b):
return "#%.2x%.2x%.2x" % (r * 42, g * 42, b * 42)
def level(grey):
return "#%.2x%.2x%.2x" % (((grey * 10) + 8,) * 3)
def index2(grey):
return str(232 + grey)
# http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
SCHEME = {
# black red green brown/yellow blue magenta cyan grey/white
'ansi2html': (
"#000316", "#aa0000", "#00aa00", "#aa5500",
"#0000aa", "#E850A8", "#00aaaa", "#F5F1DE",
"#7f7f7f", "#ff0000", "#00ff00", "#ffff00",
"#5c5cff", "#ff00ff", "#00ffff", "#ffffff"),
'xterm': (
"#000000", "#cd0000", "#00cd00", "#cdcd00",
"#0000ee", "#cd00cd", "#00cdcd", "#e5e5e5",
"#7f7f7f", "#ff0000", "#00ff00", "#ffff00",
"#5c5cff", "#ff00ff", "#00ffff", "#ffffff"),
'osx': (
"#000000", "#c23621", "#25bc24", "#adad27",
"#492ee1", "#d338d3", "#33bbc8", "#cbcccd") * 2,
# http://ethanschoonover.com/solarized
'solarized': (
"#262626", "#d70000", "#5f8700", "#af8700",
"#0087ff", "#af005f", "#00afaf", "#e4e4e4",
"#1c1c1c", "#d75f00", "#585858", "#626262",
"#808080", "#5f5faf", "#8a8a8a", "#ffffd7"),
}
def get_styles(dark_bg=True, scheme='ansi2html'):
css = [
Rule('.ansi2html-content', white_space='pre-wrap', word_wrap='break-word', display='inline'),
Rule('.body_foreground', color=('#000000', '#AAAAAA')[dark_bg]),
Rule('.body_background', background_color=('#AAAAAA', '#000000')[dark_bg]),
Rule('.body_foreground > .bold,.bold > .body_foreground, body.body_foreground > pre > .bold',
color=('#000000', '#FFFFFF')[dark_bg], font_weight=('bold', 'normal')[dark_bg]),
Rule('.inv_foreground', color=('#000000', '#FFFFFF')[not dark_bg]),
Rule('.inv_background', background_color=('#AAAAAA', '#000000')[not dark_bg]),
Rule('.ansi1', font_weight='bold'),
Rule('.ansi2', font_weight='lighter'),
Rule('.ansi3', font_style='italic'),
Rule('.ansi4', text_decoration='underline'),
Rule('.ansi5', text_decoration='blink'),
Rule('.ansi6', text_decoration='blink'),
Rule('.ansi8', visibility='hidden'),
Rule('.ansi9', text_decoration='line-through'),
]
# set palette
pal = SCHEME[scheme]
for _index in range(8):
css.append(Rule('.ansi3%s' % _index, color=pal[_index]))
css.append(Rule('.inv3%s' % _index, background_color=pal[_index]))
for _index in range(8):
css.append(Rule('.ansi4%s' % _index, background_color=pal[_index]))
css.append(Rule('.inv4%s' % _index, color=pal[_index]))
# set palette colors in 256 color encoding
pal = SCHEME[scheme]
for _index in range(len(pal)):
css.append(Rule('.ansi38-%s' % _index, color=pal[_index]))
css.append(Rule('.inv38-%s' % _index, background_color=pal[_index]))
for _index in range(len(pal)):
css.append(Rule('.ansi48-%s' % _index, background_color=pal[_index]))
css.append(Rule('.inv48-%s' % _index, color=pal[_index]))
# css.append("/* Define the explicit color codes (obnoxious) */\n\n")
for green in range(0, 6):
for red in range(0, 6):
for blue in range(0, 6):
css.append(Rule(".ansi38-%s" % index(red, green, blue),
color=color(red, green, blue)))
css.append(Rule(".inv38-%s" % index(red, green, blue),
background=color(red, green, blue)))
css.append(Rule(".ansi48-%s" % index(red, green, blue),
background=color(red, green, blue)))
css.append(Rule(".inv48-%s" % index(red, green, blue),
color=color(red, green, blue)))
for grey in range(0, 24):
css.append(Rule('.ansi38-%s' % index2(grey), color=level(grey)))
css.append(Rule('.inv38-%s' % index2(grey), background=level(grey)))
css.append(Rule('.ansi48-%s' % index2(grey), background=level(grey)))
css.append(Rule('.inv48-%s' % index2(grey), color=level(grey)))
return css
|
universe/remotes/docker_remote.py | BitJetKit/universe | 8,120 | 11137926 | <reponame>BitJetKit/universe
from __future__ import absolute_import
import base64
import logging
import os
import pipes
import sys
import threading
import uuid
import time, random
import docker
import six.moves.urllib.parse as urlparse
from gym.utils import closer
from universe import error
from universe.remotes import healthcheck, remote
from universe import error, utils
from universe.remotes.compose import container, log_printer, progress_stream
logger = logging.getLogger(__name__)
docker_closer = closer.Closer()
def random_alphanumeric(length=14):
buf = []
while len(buf) < length:
entropy = base64.encodestring(uuid.uuid4().bytes).decode('utf-8')
bytes = [c for c in entropy if c.isalnum()]
buf += bytes
return ''.join(buf)[:length]
def pretty_command(command):
return ' '.join(pipes.quote(c) for c in command)
class DockerManager(object):
def __init__(self, runtime, n, reuse=False, start_timeout=None):
super(DockerManager, self).__init__()
self.runtime = runtime
self.supports_reconnect = False
self.connect_vnc = True
self.connect_rewarder = True
self._assigner = PortAssigner(reuse=reuse)
self._popped = False
self.lock = threading.Lock()
self.envs = []
self._n = n
if start_timeout is None:
start_timeout = 2 * self._n + 5
self.start_timeout = start_timeout
self._start()
def allocate(self, handles, initial=False, params={}):
self._handles = handles
def pop(self, n=None):
"""Call from main thread. Returns the list of newly-available (handle, env) pairs."""
if self._popped:
assert n is None
return []
self._popped = True
envs = []
for i, instance in enumerate(self.instances):
env = remote.Remote(
handle=self._handles[i],
vnc_address='{}:{}'.format(instance.host, instance.vnc_port),
vnc_password='openai',
rewarder_address='{}:{}'.format(instance.host, instance.rewarder_port),
rewarder_password='<PASSWORD>',
)
envs.append(env)
return envs
def _start(self):
self.instances = [DockerInstance(self._assigner, self.runtime, label=str(i)) for i in range(self._n)]
[instance.start() for instance in self.instances]
if int(os.environ.get('OPENAI_REMOTE_VERBOSE', '1')) > 0:
self.start_logging(self.instances)
self.healthcheck(self.instances)
def close(self):
with self.lock:
[instance.close() for instance in self.instances]
def start_logging(self, instances):
containers = [instance._container for instance in instances]
labels = [str(instance.label) for instance in instances]
if all(instance.reusing for instance in instances):
# All containers are being reused, so only bother showing
# a subset of the backlog.
tail = 0
else:
# At least one container is new, so just show
# everything. It'd be nice to have finer-grained control,
# but this would require patching the log printer.
tail = 'all'
log_printer.build(containers, labels, log_args={'tail': tail})
def healthcheck(self, instances):
# Wait for boot
healthcheck.run(
['{}:{}'.format(instance.assigner.info['host'], instance.vnc_port) for instance in instances],
['{}:{}'.format(instance.assigner.info['host'], instance.rewarder_port) for instance in instances],
start_timeout=30,
)
def get_client():
"""
Set DOCKER_HOST (and probably DOCKER_TLS_VERIFY and DOCKER_CERT_PATH) to connect to a docker instance through TCP.
Leave DOCKER_HOST unset and it will use the default, typically unix:/var/run/docker.sock
It also needs to know how to connect to ports on the docker container after creating it.
Set DOCKER_NET_HOST to provide an IP address to connect to the VNC ports on
otherwise if DOCKER_HOST has a hostname, it will connect to the VNC ports using that name.
otherwise it connects using localhost
"""
info = {}
host = os.environ.get('DOCKER_HOST')
net_host = os.environ.get('DOCKER_NET_HOST')
client_api_version = os.environ.get('DOCKER_API_VERSION')
if not client_api_version:
client_api_version = "auto"
# IP to use for started containers
if net_host:
info['host'] = net_host
elif host:
info['host'] = urlparse.urlparse(host).netloc.split(':')[0]
else:
info['host'] = 'localhost'
verify = os.environ.get('DOCKER_TLS_VERIFY') == '1'
if verify: # use TLS
assert_hostname = None
cert_path = os.environ.get('DOCKER_CERT_PATH')
if cert_path:
client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))
ca_cert = os.path.join(cert_path, 'ca.pem')
else:
client_cert = ca_cert = None
tls_config = docker.tls.TLSConfig(
client_cert=client_cert,
ca_cert=ca_cert,
verify=verify,
assert_hostname=assert_hostname,
)
return docker.Client(base_url=host, tls=tls_config, version=client_api_version), info
else:
return docker.Client(base_url=host, version=client_api_version), info
class PortAssigner(object):
def __init__(self, reuse=False):
self.reuse = reuse
self.instance_id = 'universe-' + random_alphanumeric(length=6)
self.client, self.info = get_client()
self._next_port = 5900
self._refresh_ports()
def _refresh_ports(self):
ports = {}
for container in self.client.containers():
for port in container['Ports']:
# {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 5000, u'PrivatePort': 500}
if port['Type'] == 'tcp' and 'PublicPort' in port:
ports[port['PublicPort']] = container['Id']
logger.info('Ports used: %s', ports.keys())
self._ports = ports
def allocate_ports(self, num):
if self.reuse and self._next_port in self._ports:
vnc_id = self._ports[self._next_port]
rewarder_id = self._ports.get(self._next_port+10000)
# Reuse an existing docker container if it exists
if (self._next_port+10000) not in self._ports:
raise error.Error("Port {} was allocated but {} was not. This indicates unexpected state with spun-up VNC docker instances.".format(self._next_port, self._next_port+1))
elif vnc_id != rewarder_id:
raise error.Error("Port {} is exposed from {} while {} is exposed from {}. Both should come from a single Docker instance running your environment.".format(vnc_id, self._next_port, rewarder_id, self._next_port+10000))
base = self._next_port
self._next_port += 1
return base, base+10000, vnc_id
elif not self.reuse:
# Otherwise, allocate find the lowest free pair of
# ports. This doesn't work for the reuse case since on
# restart we won't remember where we spun up our
# containers.
while self._next_port in self._ports or (self._next_port+10000) in self._ports:
self._next_port += 1
base = self._next_port
self._next_port += 1
# And get started!
return base, base+10000, None
class DockerInstance(object):
def __init__(self, assigner, runtime, label='main'):
self._docker_closer_id = docker_closer.register(self)
self.label = label
self.assigner = assigner
self.name='{}-{}'.format(self.assigner.instance_id, self.label),
self.runtime = runtime
self._container_id = None
self._closed = False
self._container = None
self.host = self.assigner.info['host']
self.vnc_port = None
self.rewarder_port = None
self.reusing = None
self.started = False
def start(self, attempts=None):
if attempts is None:
# If we're reusing, we don't scan through ports for a free
# one.
if not self.assigner.reuse:
attempts = 20
else:
attempts = 1
for attempt in range(attempts):
self._spawn()
e = self._start()
if e is None:
return
time.sleep(random.uniform(1.0, 5.0))
self.assigner._refresh_ports()
raise error.Error('[{}] Could not start container after {} attempts. Last error: {}'.format(self.label, attempts, e))
def _spawn(self):
if self.runtime.image is None:
raise error.Error('No image specified')
assert self._container_id is None
self.vnc_port, self.rewarder_port, self._container_id = self.assigner.allocate_ports(2)
if self._container_id is not None:
logger.info('[%s] Reusing container %s on ports %s and %s', self.label, self._container_id[:12], self.vnc_port, self.rewarder_port)
self.reusing = True
self.started = True
return
self.reusing = False
logger.info('[%s] Creating container: image=%s. Run the same thing by hand as: %s',
self.label,
self.runtime.image,
pretty_command(self.runtime.cli_command(self.vnc_port, self.rewarder_port)))
try:
container = self._spawn_container()
except docker.errors.NotFound as e:
# Looks like we need to pull the image
assert 'No such image' in e.explanation.decode('utf-8'), 'Expected NotFound error message message to include "No such image", but it was: {}. This is probably just a bug in this assertion and the assumption was incorrect'.format(e.explanation)
logger.info('Image %s not present locally; pulling', self.runtime.image)
self._pull_image()
# If we called pull_image from multiple processes (as we do with universe-starter-agent A3C)
# these will all return at the same time. We probably all got the same port numbers before the pull started,
# so wait a short random time and refresh our port numbers
time.sleep(random.uniform(0.5, 2.5))
self.assigner._refresh_ports()
self.vnc_port, self.rewarder_port, self._container_id = self.assigner.allocate_ports(2)
if self._container_id is not None:
logger.info('[%s] Reusing container %s on ports %s and %s', self.label, self._container_id[:12], self.vnc_port, self.rewarder_port)
self.reusing = True
self.started = True
return
# Try spawning again.
container = self._spawn_container()
self._container_id = container['Id']
def _pull_image(self):
output = self.client.pull(self.runtime.image, stream=True)
return progress_stream.get_digest_from_pull(
progress_stream.stream_output(output, sys.stdout))
# docker-compose uses this:
# try:
# except StreamOutputError as e:
# if not ignore_pull_failures:
# raise
# else:
# log.error(six.text_type(e))
def _spawn_container(self):
# launch instance, and refresh if error
container = self.client.create_container(
image=self.runtime.image,
command=self.runtime.command,
# environment=self.runtime.environment,
name=self.name,
host_config=self.client.create_host_config(
port_bindings={
5900: self.vnc_port,
15900: self.rewarder_port,
},
**self.runtime.host_config),
labels={
'com.openai.automanaged': 'true',
}
)
return container
def _start(self):
# Need to start up the container!
if not self.started:
logger.debug('[%s] Starting container: id=%s', self.label, self._container_id)
try:
self.client.start(container=self._container_id)
except docker.errors.APIError as e:
if 'port is already allocated' in str(e.explanation):
logger.info('[%s] Could not start container: %s', self.label, e)
self._remove()
return e
else:
raise
else:
self.started = True
self._container = container.Container.from_id(self.client, self._container_id)
return None
def _remove(self):
logger.info("Killing and removing container: id=%s", self._container_id)
try:
self.client.remove_container(container=self._container_id, force=True)
except docker.errors.APIError as e:
# This seems to happen occasionally when we try to delete a container immediately after creating it.
# But although we get an error trying to remove it, it usually goes away shortly
# A typical error message is
# Driver aufs failed to remove root filesystem 0015803583d91741d25fce28ae0ef540b436853d1c90061caacaef97e3682403: \
# rename /var/lib/docker/aufs/mnt/69a72854511f1fbb9d7cb0ef0ce0787e573af0887c1213ba3a0c3a0cfd71efd2 \
# /var/lib/docker/aufs/mnt/69a72854511f1fbb9d7cb0ef0ce0787e573af0887c1213ba3a0c3a0cfd71efd2-removing: \
# device or resource busy
# Just proceed as if it had gone away
if 'device or resource busy' in str(e.explanation):
logger.info("[%s] Could not remove container: %s. You can always kill all automanaged environments on this Docker daemon via: docker rm -f $(docker ps -q -a -f 'label=com.openai.automanaged=true')", self.label, e)
self._container_id = None
return e
else:
raise
self._container_id = None
def __del__(self):
self.close()
def close(self):
if self._closed:
return
docker_closer.unregister(self._docker_closer_id)
# Make sure 1. we were the onse who started it, 2. it's
# actually been started, and 3. we're meant to kill it.
if self._container_id and not self.assigner.reuse:
self._remove()
self._closed = True
@property
def client(self):
return self.assigner.client
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
from universe.runtimes import registration
# docker run --name test --rm -ti -p 5900:5900 -p 15900:15900 quay.io/openai/universe.gym-core
instance = DockerManager(
runtime=registration.runtime_spec('gym-core'),
n=2,
)
instance.start()
import ipdb;ipdb.set_trace()
|
pymoo/vendor/go_benchmark_functions/go_funcs_F.py | gabicavalcante/pymoo | 762 | 11137944 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
from .go_benchmark import Benchmark
class FreudensteinRoth(Benchmark):
r"""
FreudensteinRoth objective function.
This class defines the Freudenstein & Roth [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{FreudensteinRoth}}(x) = \left\{x_1 - 13 + \left[(5 - x_2) x_2
- 2 \right] x_2 \right\}^2 + \left \{x_1 - 29
+ \left[(x_2 + 1) x_2 - 14 \right] x_2 \right\}^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [5, 4]`
.. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-3, 3), (-5, 5)]
self.global_optimum = [[5.0, 4.0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
f1 = (-13.0 + x[0] + ((5.0 - x[1]) * x[1] - 2.0) * x[1]) ** 2
f2 = (-29.0 + x[0] + ((x[1] + 1.0) * x[1] - 14.0) * x[1]) ** 2
return f1 + f2
|
tests/test_guid_type.py | nyejon/fastapi-utils | 994 | 11137964 | import uuid
from fastapi import FastAPI
from starlette.testclient import TestClient
from fastapi_utils.session import context_session
from tests.conftest import User, session_maker
def test_guid(test_app: FastAPI) -> None:
name1 = "test_name_1"
name2 = "test_name_2"
user_id_1 = str(uuid.uuid4())
with context_session(session_maker.cached_engine) as session:
user1 = User(id=user_id_1, name=name1)
session.add(user1)
session.commit()
assert str(user1.id) == user_id_1
assert user1.related_id is None
with session_maker.context_session() as session:
user2 = User(name=name2)
assert user2.id is None
session.add(user2)
session.commit()
user_id_2 = user2.id
assert user_id_2 is not None
assert user2.related_id is None
test_client = TestClient(test_app)
assert test_client.get(f"/{user_id_1}").json() == name1
assert test_client.get(f"/{user_id_2}").json() == name2
|
data_capture/decorators.py | mepsd/CLAC | 126 | 11137990 | <filename>data_capture/decorators.py
from functools import wraps
from django.shortcuts import redirect
from frontend import ajaxform
def handle_cancel(*args, redirect_name='index', key_prefix='data_capture:'):
'''
Decorator to handle cancel behavior in Data Capture flows.
The associated request's POST data is checked for a 'cancel' key,
and, if found, all session keys that start with `key_prefix`
are deleted and the request is redirected to `redirect_name`.
'''
no_args = False
if len(args) == 1 and callable(args[0]):
# We were called without args
function = args[0]
no_args = True
def decorator(function):
@wraps(function)
def wrapper(request, *args, **kwargs):
if request.method == 'POST' and 'cancel' in request.POST:
if key_prefix:
# .keys() returns an iterator, which can't be deleted from
# while in a loop, so we use list() to get an actual list
session_keys = list(request.session.keys())
for k in session_keys:
if k.startswith(key_prefix):
del request.session[k]
# if AJAX request, then send JSON response
# that has a 'redirect_url' property
if request.is_ajax():
return ajaxform.ajax_redirect(redirect_name)
# redirect to the view named redirect_name
return redirect(redirect_name)
else:
# pass through
return function(request, *args, **kwargs)
return wrapper
if no_args:
return decorator(function)
else:
return decorator
|
piGAN_lib/eval_metrics.py | zihangJiang/CIPS-3D | 308 | 11137993 | import os
import shutil
import torch
import math
from torch_fidelity import calculate_metrics
from torchvision.utils import save_image
from tqdm import tqdm
import copy
import argparse
import shutil
import curriculums
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('generator_file', type=str)
parser.add_argument('--real_image_dir', type=str, required=True)
parser.add_argument('--output_dir', type=str, default='temp')
parser.add_argument('--num_images', type=int, default=2048)
parser.add_argument('--max_batch_size', type=int, default=94800000)
parser.add_argument('--curriculum', type=str, default='CELEBA')
opt = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if os.path.exists(opt.output_dir) and os.path.isdir(opt.output_dir):
shutil.rmtree(opt.output_dir)
os.makedirs(opt.output_dir, exist_ok=False)
generator = torch.load(opt.generator_file, map_location=device)
generator.set_device(device)
ema_file = opt.generator_file.split('generator')[0] + 'ema.pth'
ema = torch.load(ema_file)
ema.copy_to(generator.parameters())
generator.eval()
curriculum = curriculums.extract_metadata(getattr(curriculums, opt.curriculum), generator.step)
curriculum['img_size'] = 128
curriculum['psi'] = 1
curriculum['last_back'] = curriculum.get('eval_last_back', False)
curriculum['nerf_noise'] = 0
for img_counter in tqdm(range(opt.num_images)):
z = torch.randn(1, 256, device=device)
with torch.no_grad():
img = generator.staged_forward(z, max_batch_size=opt.max_batch_size, **curriculum)[0].to(device)
save_image(img, os.path.join(opt.output_dir, f'{img_counter:0>5}.jpg'), normalize=True, range=(-1, 1))
metrics_dict = calculate_metrics(opt.output_dir, opt.real_image_dir, cuda=True, isc=True, fid=True, kid=True, verbose=False)
print(metrics_dict) |
lib/jnpr/junos/command/__init__.py | kimcharli/py-junos-eznc | 576 | 11137999 | import sys
import os
import yaml
import types
from jnpr.junos.factory.factory_loader import FactoryLoader
import yamlordereddictloader
__all__ = []
class MetaPathFinder(object):
def find_module(self, fullname, path=None):
mod = fullname.split(".")[-1]
if mod in [
os.path.splitext(i)[0] for i in os.listdir(os.path.dirname(__file__))
]:
return MetaPathLoader()
class MetaPathLoader(object):
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
mod = fullname.split(".")[-1]
modObj = types.ModuleType(
mod, "Module created to provide a context for %s" % mod
)
with open(os.path.join(os.path.dirname(__file__), mod + ".yml"), "r") as stream:
try:
modules = FactoryLoader().load(
yaml.load(stream, Loader=yamlordereddictloader.Loader)
)
except yaml.YAMLError as exc:
raise ImportError("%s is not loaded" % mod)
for k, v in modules.items():
setattr(modObj, k, v)
sys.modules[fullname] = modObj
return modObj
sys.meta_path.insert(0, MetaPathFinder())
|
test/test_utils/io/test_traj_logging.py | TheVinhLuong102/AutoML-SMAC3 | 711 | 11138005 | import tempfile
import logging
import json
import os
import unittest.mock
from unittest.mock import patch
from smac.utils.io.traj_logging import TrajLogger
from smac.utils.io.traj_logging import TrajEntry
from smac.configspace import ConfigurationSpace,\
Configuration, CategoricalHyperparameter, Constant, UniformFloatHyperparameter, UniformIntegerHyperparameter
from smac.scenario.scenario import Scenario
from smac.stats.stats import Stats
__copyright__ = "Copyright 2021, AutoML.org Freiburg-Hannover"
__license__ = "3-clause BSD"
class TrajLoggerTest(unittest.TestCase):
def mocked_get_used_wallclock_time(self):
self.value += 1
return self.value
def setUp(self):
logging.basicConfig()
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
self.value = 0
self.cs = ConfigurationSpace()
self.cs.add_hyperparameters([
UniformFloatHyperparameter('param_a', -0.2, 1.77, 1.1),
UniformIntegerHyperparameter('param_b', -3, 10, 1),
Constant('param_c', 'value'),
CategoricalHyperparameter('ambigous_categorical', choices=['True', True, 5]), # True is ambigous here
])
self.test_config = Configuration(self.cs, {'param_a': 0.5,
'param_b': 1,
'param_c': 'value',
'ambigous_categorical': 5})
def test_init(self):
scen = Scenario(scenario={'run_obj': 'quality', 'cs': self.cs, 'output_dir': ''})
stats = Stats(scen)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'tmp_test_folder')
TrajLogger(output_dir=path, stats=stats)
self.assertTrue(os.path.exists(path))
def test_oserror(self):
scen = Scenario(scenario={'run_obj': 'quality', 'cs': self.cs, 'output_dir': ''})
stats = Stats(scen)
# test OSError
with patch('os.makedirs') as osMock:
osMock.side_effect = OSError()
self.assertRaises(OSError, TrajLogger, output_dir='random_directory', stats=stats)
@patch('smac.stats.stats.Stats')
def test_add_entries(self, mock_stats):
# Mock stats
mock_stats.ta_time_used = .5
mock_stats.get_used_wallclock_time = self.mocked_get_used_wallclock_time
mock_stats.finished_ta_runs = 1
with tempfile.TemporaryDirectory() as tmpdir:
tl = TrajLogger(output_dir=tmpdir, stats=mock_stats)
# Add some entries
tl.add_entry(0.9, 1, self.test_config, 0)
mock_stats.ta_runs = 2
mock_stats.ta_time_used = 0
tl.add_entry(1.3, 1, self.test_config, 10)
mock_stats.ta_time_used = 0
tl.add_entry(0.7, 2, Configuration(self.cs, dict(self.test_config.get_dictionary(), **{'param_a': 0.})), 10)
# Test the list that's added to the trajectory class
self.assertEqual(tl.trajectory[0], TrajEntry(0.9, 1, self.test_config, 1, 0.5, 1, 0))
# Test named-tuple-access:
self.assertEqual(tl.trajectory[0].train_perf, 0.9)
self.assertEqual(tl.trajectory[0].incumbent_id, 1)
self.assertEqual(tl.trajectory[0].ta_runs, 1)
self.assertEqual(tl.trajectory[0].ta_time_used, 0.5)
self.assertEqual(tl.trajectory[0].wallclock_time, 1)
self.assertEqual(tl.trajectory[0].budget, 0)
self.assertEqual(len(tl.trajectory), 3)
# Check if the trajectories are generated
for fn in ['traj_old.csv', 'traj_aclib2.json', 'traj.json']:
self.assertTrue(os.path.exists(os.path.join(tmpdir, fn)))
# Load trajectories
with open(os.path.join(tmpdir, 'traj_old.csv')) as to:
data = to.read().split('\n')
with open(os.path.join(tmpdir, 'traj_aclib2.json')) as js_aclib:
json_dicts_aclib2 = [json.loads(line) for line in js_aclib.read().splitlines()]
with open(os.path.join(tmpdir, 'traj.json')) as js:
json_dicts_alljson = [json.loads(line) for line in js.read().splitlines()]
# Check old format
header = data[0].split(',')
self.assertEqual(header[0], '"CPU Time Used"')
self.assertEqual(header[-1], '"Configuration..."')
data = list(map(lambda x: x.split(', '), data[1:]))
frmt_str = '%1.6f'
self.assertEqual(frmt_str % 0.5, data[0][0])
self.assertEqual(frmt_str % 0.9, data[0][1])
self.assertEqual(frmt_str % 0.5, data[0][4])
self.assertEqual(frmt_str % 0, data[1][0])
self.assertEqual(frmt_str % 1.3, data[1][1])
self.assertEqual(frmt_str % 2, data[1][4])
self.assertEqual(frmt_str % 0, data[2][0])
self.assertEqual(frmt_str % .7, data[2][1])
self.assertEqual(frmt_str % 3, data[2][4])
# Check aclib2-format
self.assertEqual(json_dicts_aclib2[0]['cpu_time'], .5)
self.assertEqual(json_dicts_aclib2[0]['cost'], 0.9)
self.assertEqual(len(json_dicts_aclib2[0]['incumbent']), 4)
self.assertTrue("param_a='0.5'" in json_dicts_aclib2[0]['incumbent'])
self.assertTrue("param_a='0.0'" in json_dicts_aclib2[2]['incumbent'])
# Check alljson-format
self.assertEqual(json_dicts_alljson[0]['cpu_time'], .5)
self.assertEqual(json_dicts_alljson[0]['cost'], 0.9)
self.assertEqual(len(json_dicts_alljson[0]['incumbent']), 4)
self.assertTrue(json_dicts_alljson[0]["incumbent"]["param_a"] == 0.5)
self.assertTrue(json_dicts_alljson[2]["incumbent"]["param_a"] == 0.0)
self.assertEqual(json_dicts_alljson[0]['budget'], 0)
self.assertEqual(json_dicts_alljson[2]['budget'], 10)
@patch('smac.stats.stats.Stats')
def test_ambigious_categoricals(self, mock_stats):
mock_stats.ta_time_used = 0.5
mock_stats.get_used_wallclock_time = self.mocked_get_used_wallclock_time
mock_stats.finished_ta_runs = 1
with tempfile.TemporaryDirectory() as tmpdir:
tl = TrajLogger(output_dir=tmpdir, stats=mock_stats)
problem_config = Configuration(self.cs, {'param_a': 0.0, 'param_b': 2, 'param_c': 'value',
'ambigous_categorical': True}) # not recoverable without json
tl.add_entry(0.9, 1, problem_config)
from_aclib2 = tl.read_traj_aclib_format(os.path.join(tmpdir, 'traj_aclib2.json'), self.cs)
from_alljson = tl.read_traj_alljson_format(os.path.join(tmpdir, 'traj.json'), self.cs)
# Wrong! but passes:
self.assertIsInstance(from_aclib2[0]['incumbent']['ambigous_categorical'], str)
# Works good for alljson:
self.assertIsInstance(from_alljson[0]['incumbent']['ambigous_categorical'], bool)
if __name__ == "__main__":
unittest.main()
|
tests/filters/filters.py | pyllyukko/plaso | 1,253 | 11138023 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the event filter expression parser filter classes."""
import unittest
from plaso.containers import events
from plaso.filters import filters
from plaso.lib import definitions
from tests import test_lib as shared_test_lib
from tests.containers import test_lib as containers_test_lib
class FalseFilter(filters.Operator):
"""A filter which always evaluates to False for testing."""
def Matches(self, event, event_data, event_data_stream, event_tag):
"""Determines if the event, data and tag match the filter.
Args:
event (EventObject): event to compare against the filter.
event_data (EventData): event data to compare against the filter.
event_data_stream (EventDataStream): event data stream.
event_tag (EventTag): event tag to compare against the filter.
Returns:
bool: True if the event, data and tag match the filter, False otherwise.
"""
return False
class TrueFilter(filters.Operator):
"""A filter which always evaluates to True for testing."""
def Matches(self, event, event_data, event_data_stream, event_tag):
"""Determines if the event, data and tag match the filter.
Args:
event (EventObject): event to compare against the filter.
event_data (EventData): event data to compare against the filter.
event_data_stream (EventDataStream): event data stream.
event_tag (EventTag): event tag to compare against the filter.
Returns:
bool: True if the event, data and tag match the filter, False otherwise.
"""
return True
class FilterTest(shared_test_lib.BaseTestCase):
"""Tests the filter."""
# pylint: disable=protected-access
def testInitialize(self):
"""Tests the __init__ function."""
filter_object = filters.Filter()
self.assertIsNotNone(filter_object)
def testCopyValueToString(self):
"""Tests the _CopyValueToString function."""
filter_object = filters.Filter()
string = filter_object._CopyValueToString(['1', '2', '3'])
self.assertEqual(string, '123')
string = filter_object._CopyValueToString([1, 2, 3])
self.assertEqual(string, '123')
string = filter_object._CopyValueToString(123)
self.assertEqual(string, '123')
string = filter_object._CopyValueToString(b'123')
self.assertEqual(string, '123')
string = filter_object._CopyValueToString('123')
self.assertEqual(string, '123')
class AndFilterTest(shared_test_lib.BaseTestCase):
"""Tests the boolean AND filter."""
_TEST_EVENTS = [
{'data_type': 'test:event',
'test_value': 1,
'timestamp': 5134324321,
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN}]
def testMatches(self):
"""Tests the Matches function."""
event, event_data, _ = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
false_filter_object = FalseFilter()
true_filter_object = TrueFilter()
filter_object = filters.AndFilter(arguments=[
true_filter_object, true_filter_object])
result = filter_object.Matches(event, event_data, None, None)
self.assertTrue(result)
filter_object = filters.AndFilter(arguments=[
false_filter_object, true_filter_object])
result = filter_object.Matches(event, event_data, None, None)
self.assertFalse(result)
class OrFilterTest(shared_test_lib.BaseTestCase):
"""Tests the boolean OR filter."""
_TEST_EVENTS = [
{'data_type': 'test:event',
'test_value': 1,
'timestamp': 5134324321,
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN}]
def testMatches(self):
"""Tests the Matches function."""
event, event_data, _ = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
false_filter_object = FalseFilter()
true_filter_object = TrueFilter()
filter_object = filters.OrFilter(arguments=[
false_filter_object, true_filter_object])
result = filter_object.Matches(event, event_data, None, None)
self.assertTrue(result)
filter_object = filters.OrFilter(arguments=[
false_filter_object, false_filter_object])
result = filter_object.Matches(event, event_data, None, None)
self.assertFalse(result)
class IdentityFilterTest(shared_test_lib.BaseTestCase):
"""Tests the filter which always evaluates to True."""
_TEST_EVENTS = [
{'data_type': 'test:event',
'test_value': 1,
'timestamp': 5134324321,
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN}]
def testMatches(self):
"""Tests the Matches function."""
event, event_data, _ = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
filter_object = filters.IdentityFilter()
result = filter_object.Matches(event, event_data, None, None)
self.assertTrue(result)
class BinaryOperatorTest(shared_test_lib.BaseTestCase):
"""Tests the binary operators interface."""
def testInitialize(self):
"""Tests the __init__ function."""
filter_object = filters.BinaryOperator(arguments=['test_value', 1])
self.assertIsNotNone(filter_object)
class GenericBinaryOperatorTest(shared_test_lib.BaseTestCase):
"""Tests the shared functionality for common binary operators."""
# pylint: disable=protected-access
_TEST_EVENTS = [
{'data_type': 'test:event',
'test_value': 1,
'timestamp': 5134324321,
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN}]
def testInitialize(self):
"""Tests the __init__ function."""
filter_object = filters.GenericBinaryOperator(arguments=['test_value', 1])
self.assertIsNotNone(filter_object)
def testGetValue(self):
"""Tests the _GetValue function."""
event, event_data, _ = containers_test_lib.CreateEventFromValues(
self._TEST_EVENTS[0])
event_tag = events.EventTag()
event_tag.AddLabel('browser_search')
filter_object = filters.GenericBinaryOperator(arguments=['test_value', 1])
test_value = filter_object._GetValue(
'test_value', event, event_data, None, event_tag)
self.assertEqual(test_value, 1)
test_value = filter_object._GetValue(
'timestamp', event, event_data, None, event_tag)
self.assertIsNotNone(test_value)
self.assertEqual(test_value.timestamp, 5134324321)
test_value = filter_object._GetValue(
'tag', event, event_data, None, event_tag)
self.assertEqual(test_value, ['browser_search'])
# TODO: add tests for FlipBool function
class EqualsOperatorTest(shared_test_lib.BaseTestCase):
"""Tests the equals operator."""
# pylint: disable=protected-access
def testCompareValue(self):
"""Tests the _CompareValue function."""
filter_object = filters.EqualsOperator(arguments=['first', 'second'])
result = filter_object._CompareValue(0, 10)
self.assertFalse(result)
result = filter_object._CompareValue(10, 10)
self.assertTrue(result)
class NotEqualsOperatorTest(shared_test_lib.BaseTestCase):
"""Tests the not equals operator."""
# pylint: disable=protected-access
def testCompareValue(self):
"""Tests the _CompareValue function."""
filter_object = filters.NotEqualsOperator(arguments=['first', 'second'])
result = filter_object._CompareValue(0, 10)
self.assertTrue(result)
result = filter_object._CompareValue(10, 10)
self.assertFalse(result)
class LessThanOperatorTest(shared_test_lib.BaseTestCase):
"""Tests the less than operator."""
# pylint: disable=protected-access
def testCompareValue(self):
"""Tests the _CompareValue function."""
filter_object = filters.LessThanOperator(arguments=['first', 'second'])
result = filter_object._CompareValue(0, 10)
self.assertTrue(result)
result = filter_object._CompareValue(10, 10)
self.assertFalse(result)
result = filter_object._CompareValue(20, 10)
self.assertFalse(result)
class LessEqualOperatorTest(shared_test_lib.BaseTestCase):
"""Tests the less equal operator."""
# pylint: disable=protected-access
def testCompareValue(self):
"""Tests the _CompareValue function."""
filter_object = filters.LessEqualOperator(arguments=['first', 'second'])
result = filter_object._CompareValue(0, 10)
self.assertTrue(result)
result = filter_object._CompareValue(10, 10)
self.assertTrue(result)
result = filter_object._CompareValue(20, 10)
self.assertFalse(result)
class GreaterThanOperatorTest(shared_test_lib.BaseTestCase):
"""Tests the greater than operator."""
# pylint: disable=protected-access
def testCompareValue(self):
"""Tests the _CompareValue function."""
filter_object = filters.GreaterThanOperator(arguments=['first', 'second'])
result = filter_object._CompareValue(0, 10)
self.assertFalse(result)
result = filter_object._CompareValue(10, 10)
self.assertFalse(result)
result = filter_object._CompareValue(20, 10)
self.assertTrue(result)
class GreaterEqualOperatorTest(shared_test_lib.BaseTestCase):
"""Tests the greater equal operator."""
# pylint: disable=protected-access
def testCompareValue(self):
"""Tests the _CompareValue function."""
filter_object = filters.GreaterEqualOperator(arguments=['first', 'second'])
result = filter_object._CompareValue(0, 10)
self.assertFalse(result)
result = filter_object._CompareValue(10, 10)
self.assertTrue(result)
result = filter_object._CompareValue(20, 10)
self.assertTrue(result)
# TODO: add tests for Contains
# TODO: add tests for InSet
# TODO: add tests for Regexp
# TODO: add tests for RegexpInsensitive
if __name__ == "__main__":
unittest.main()
|
examples/testapp/testapp/views.py | benthomasson/gevent-socketio | 625 | 11138027 | <gh_stars>100-1000
from pyramid.view import view_config
import gevent
from socketio import socketio_manage
from socketio.namespace import BaseNamespace
from socketio.mixins import RoomsMixin, BroadcastMixin
from gevent import socket
def index(request):
""" Base view to load our template """
return {}
"""
ACK model:
The client sends a message of the sort:
{type: 'message',
id: 140,
ack: true,
endpoint: '/tobi',
data: ''
}
The 'ack' value is 'true', marking that we want an automatic 'ack' when it
receives the packet. The Node.js version sends the ack itself, without any
server-side code interaction. It dispatches the packet only after sending back
an ack, so the ack isn't really a reply. It's just marking the server received
it, but not if the event/message/json was properly processed.
The automated reply from such a request is:
{type: 'ack',
ackId: '140',
endpoint: '',
args: []
}
Where 'ackId' corresponds to the 'id' of the originating message. Upon
reception of this 'ack' message, the client then looks in an object if there
is a callback function to call associated with this message id (140). If so,
runs it, otherwise, drops the packet.
There is a second way to ask for an ack, sending a packet like this:
{type: 'event',
id: 1,
ack: 'data',
endpoint: '',
name: 'chat',
args: ['', '']
}
{type: 'json',
id: 1,
ack: 'data',
endpoint: '',
data: {a: 'b'}
}
.. the same goes for a 'message' packet, which has the 'ack' equal to 'data'.
When the server receives such a packet, it dispatches the corresponding event
(either the named event specified in an 'event' type packet, or 'message' or
'json, if the type is so), and *adds* as a parameter, in addition to the
'args' passed by the event (or 'data' for 'message'/'json'), the ack() function
to call (it encloses the packet 'id' already). Any number of arguments passed
to that 'ack()' function will be passed on to the client-side, and given as
parameter on the client-side function.
That is the returning 'ack' message, with the data ready to be passed as
arguments to the saved callback on the client side:
{type: 'ack',
ackId: '12',
endpoint: '',
args: ['woot', 'wa']
}
"""
class GlobalIONamespace(BaseNamespace, BroadcastMixin):
def on_chat(self, *args):
self.emit("bob", {'hello': 'world'})
print "Received chat message", args
self.broadcast_event_not_me('chat', *args)
def recv_connect(self):
print "CONNNNNNNN"
self.emit("you_just_connected", {'bravo': 'kid'})
self.spawn(self.cpu_checker_process)
def recv_json(self, data):
self.emit("got_some_json", data)
def on_bob(self, *args):
self.broadcast_event('broadcasted', args)
self.socket['/chat'].emit('bob')
def cpu_checker_process(self):
"""This will be a greenlet"""
ret = os.system("cat /proc/cpu/stuff")
self.emit("cpu_value", ret)
class ChatIONamespace(BaseNamespace, RoomsMixin):
def on_mymessage(self, msg):
print "In on_mymessage"
self.send("little message back")
self.send({'blah': 'blah'}, json=True)
for x in xrange(2):
self.emit("pack", {'the': 'more', 'you': 'can'})
def on_my_callback(self, packet):
return (1, 2)
def on_trigger_server_callback(self, superbob):
def cb():
print "OK, WE WERE CALLED BACK BY THE ACK! THANKS :)"
self.emit('callmeback', 'this is a first param',
'this is the last param', callback=cb)
def cb2(param1, param2):
print "OK, GOT THOSE VALUES BACK BY CB", param1, param2
self.emit('callmeback', 'this is a first param',
'this is the last param', callback=cb2)
def on_rtc_invite(self, sdp):
print "Got an RTC invite, now pushing to others..."
self.emit_to_room('room1', 'rtc_invite', self.session['nickname'],
sdp)
def recv_connect(self):
self.session['nickname'] = 'guest123'
self.join('room1')
def recv_message(self, data):
print "Received a 'message' with data:", data
def on_disconnect_me(self, data):
print "Disconnecting you buddy", data
self.disconnect()
nsmap = {'': GlobalIONamespace,
'/chat': ChatIONamespace}
@view_config(route_name='socket_io')
def socketio_service(request):
""" The view that will launch the socketio listener """
socketio_manage(request.environ, namespaces=nsmap, request=request)
return {}
|
tests/io/test_everest.py | jorgemarpa/lightkurve | 235 | 11138048 | import pytest
from lightkurve import search_lightcurve
@pytest.mark.remote_data
def test_search_everest():
"""Can we search and download an EVEREST light curve?"""
search = search_lightcurve("GJ 9827", author="EVEREST", campaign=12)
assert len(search) == 1
assert search.table["author"][0] == "EVEREST"
lc = search.download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.campaign == 12
|
Chapter02/calendar_form.py | trappn/Mastering-GUI-Programming-with-Python | 138 | 11138050 | <reponame>trappn/Mastering-GUI-Programming-with-Python<gh_stars>100-1000
import sys
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtCore as qtc
class MainWindow(qtw.QWidget):
def __init__(self):
"""MainWindow constructor."""
super().__init__()
# Configure the window
self.setWindowTitle("My Calendar App")
self.resize(800, 600)
# Create our widgets
self.calendar = qtw.QCalendarWidget()
self.event_list = qtw.QListWidget()
self.event_title = qtw.QLineEdit()
self.event_category = qtw.QComboBox()
self.event_time = qtw.QTimeEdit(qtc.QTime(8, 0))
self.allday_check = qtw.QCheckBox('All Day')
self.event_detail = qtw.QTextEdit()
self.add_button = qtw.QPushButton('Add/Update')
self.del_button = qtw.QPushButton('Delete')
# Configure some widgets
# Add event categories
self.event_category.addItems(
['Select category…', 'New…', 'Work',
'Meeting', 'Doctor', 'Family']
)
# disable the first category item
self.event_category.model().item(0).setEnabled(False)
# Arrange the widgets
main_layout = qtw.QHBoxLayout()
self.setLayout(main_layout)
main_layout.addWidget(self.calendar)
# Calendar expands to fill the window
self.calendar.setSizePolicy(
qtw.QSizePolicy.Expanding,
qtw.QSizePolicy.Expanding
)
right_layout = qtw.QVBoxLayout()
main_layout.addLayout(right_layout)
right_layout.addWidget(qtw.QLabel('Events on Date'))
right_layout.addWidget(self.event_list)
# Event list expands to fill the right area
self.event_list.setSizePolicy(
qtw.QSizePolicy.Expanding,
qtw.QSizePolicy.Expanding
)
# Create a sub-layout for the event view/add form
event_form = qtw.QGroupBox('Event')
right_layout.addWidget(event_form)
event_form_layout = qtw.QGridLayout()
event_form.setLayout(event_form_layout)
event_form_layout.addWidget(self.event_title, 1, 1, 1, 3)
event_form_layout.addWidget(self.event_category, 2, 1)
event_form_layout.addWidget(self.event_time, 2, 2,)
event_form_layout.addWidget(self.allday_check, 2, 3)
event_form_layout.addWidget(self.event_detail, 3, 1, 1, 3)
event_form_layout.addWidget(self.add_button, 4, 2)
event_form_layout.addWidget(self.del_button, 4, 3)
self.show()
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
# it's required to save a reference to MainWindow.
# if it goes out of scope, it will be destroyed.
mw = MainWindow()
sys.exit(app.exec())
|
tests/www/views/test_views_pool.py | ChaseKnowlden/airflow | 15,947 | 11138124 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import flask
import pytest
from airflow.models import Pool
from airflow.utils.session import create_session
from tests.test_utils.www import check_content_in_response, check_content_not_in_response
POOL = {
'pool': 'test-pool',
'slots': 777,
'description': 'test-pool-description',
}
@pytest.fixture(autouse=True)
def clear_pools():
with create_session() as session:
session.query(Pool).delete()
@pytest.fixture()
def pool_factory(session):
def factory(**values):
pool = Pool(**{**POOL, **values}) # Passed in values override defaults.
session.add(pool)
session.commit()
return pool
return factory
def test_create_pool_with_same_name(admin_client):
# create test pool
resp = admin_client.post('/pool/add', data=POOL, follow_redirects=True)
check_content_in_response('Added Row', resp)
# create pool with the same name
resp = admin_client.post('/pool/add', data=POOL, follow_redirects=True)
check_content_in_response('Already exists.', resp)
def test_create_pool_with_empty_name(admin_client):
resp = admin_client.post(
'/pool/add',
data={**POOL, "pool": ""},
follow_redirects=True,
)
check_content_in_response('This field is required.', resp)
def test_odd_name(admin_client, pool_factory):
pool_factory(pool="test-pool<script></script>")
resp = admin_client.get('/pool/list/')
check_content_in_response('test-pool<script>', resp)
check_content_not_in_response('test-pool<script>', resp)
def test_list(app, admin_client, pool_factory):
pool_factory(pool="test-pool")
resp = admin_client.get('/pool/list/')
# We should see this link
with app.test_request_context():
url = flask.url_for('TaskInstanceModelView.list', _flt_3_pool='test-pool', _flt_3_state='running')
used_tag = flask.Markup("<a href='{url}'>{slots}</a>").format(url=url, slots=0)
url = flask.url_for('TaskInstanceModelView.list', _flt_3_pool='test-pool', _flt_3_state='queued')
queued_tag = flask.Markup("<a href='{url}'>{slots}</a>").format(url=url, slots=0)
check_content_in_response(used_tag, resp)
check_content_in_response(queued_tag, resp)
def test_pool_muldelete(session, admin_client, pool_factory):
pool = pool_factory()
resp = admin_client.post(
"/pool/action_post",
data={"action": "muldelete", "rowid": [pool.id]},
follow_redirects=True,
)
assert resp.status_code == 200
assert session.query(Pool).filter(Pool.id == pool.id).count() == 0
def test_pool_muldelete_default(session, admin_client, pool_factory):
pool = pool_factory(pool="default_pool")
resp = admin_client.post(
"/pool/action_post",
data={"action": "muldelete", "rowid": [pool.id]},
follow_redirects=True,
)
check_content_in_response("default_pool cannot be deleted", resp)
assert session.query(Pool).filter(Pool.id == pool.id).count() == 1
|
tests/test_metrics.py | vpeopleonatank/segmentation | 122 | 11138157 | import numpy as np
import pytest
import torch
from sklearn.metrics import f1_score
from src.metrics.f1_score import F1Score
from src.utils.utils import set_seed
@torch.no_grad()
@pytest.mark.parametrize('average', ['micro', 'macro', 'weighted'])
def test_f1score_metric(average: str) -> None:
set_seed(42)
labels = torch.randint(1, 10, (4096, 100)).flatten()
predictions = torch.randint(1, 10, (4096, 100)).flatten()
labels_numpy = labels.numpy()
predictions_numpy = predictions.numpy()
f1_metric = F1Score(average)
my_pred = f1_metric(predictions, labels)
f1_pred = f1_score(labels_numpy, predictions_numpy, average=average)
assert np.isclose(my_pred.item(), f1_pred.item())
|
frontend/src/indexesfrontend.py | grofers/PGObserver | 274 | 11138162 | import flotgraph
import time
import indexdata
import hosts
import datetime
import tplE
class IndexesFrontend(object):
def default(self, *p, **params):
if len(p) < 2:
return ""
hostId = int(p[0]) if p[0].isdigit() else hosts.uiShortnameToHostId(p[0])
hostUiName = p[0] if not p[0].isdigit() else hosts.hostIdToUiShortname(p[0])
table_name = p[1]
if table_name.find('.') == -1:
raise Exception('Full table name needed, e.g. schema_x.table_y')
schema = table_name.split('.')[0]
if 'from' in params and 'to' in params:
interval = {}
interval['from'] = params['from']
interval['to'] = params['to']
else:
interval = {}
interval['from'] = (datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d')
interval['to'] = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
data = indexdata.getIndexesDataForTable(hostId, table_name, interval['from'], interval['to'])
all_graphs=[]
i=0
for x in data:
one_index_graphs=[]
for k,v in x['data'].iteritems():
i+=1
if k == 'size':
graph = flotgraph.SizeGraph ("index"+str(i),"right")
else:
graph = flotgraph.Graph ("index"+str(i),"right")
graph.addSeries(k,k)
for p in v:
graph.addPoint(k, int(time.mktime(p[0].timetuple()) * 1000) , p[1])
graph = graph.render()
one_index_graphs.append({'data':graph, 'i':i, 'type':k})
one_index_graphs.sort(key=lambda x:x['type'])
all_graphs.append({'name':x['index_name'], 'graphs': one_index_graphs, 'last_index_size':x['last_index_size'], 'total_end_size':x['total_end_size'], 'pct_of_total_end_size':x['pct_of_total_end_size']})
all_graphs = sorted(all_graphs, key=lambda x:x['last_index_size'], reverse=True)
tpl = tplE.env.get_template('table_indexes.html')
return tpl.render(table_name=table_name,
host=hostId,
schema=schema,
interval=interval,
hostuiname = hostUiName,
hostname = hosts.getHosts()[hostId]['uilongname'],
all_graphs=all_graphs,
target='World')
def raw(self, host, table, from_date=None, to_date=None):
host = int(host) if host.isdigit() else hosts.uiShortnameToHostId(host)
if not from_date:
from_date = (datetime.datetime.now() - datetime.timedelta(days=14)).strftime('%Y-%m-%d')
if not to_date:
to_date = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
return indexdata.getIndexesDataForTable(host, table, from_date, to_date)
default.exposed = True
|
openpoiservice/server/db_import/models.py | larsrinn/openpoiservice | 131 | 11138178 | <reponame>larsrinn/openpoiservice<filename>openpoiservice/server/db_import/models.py
# openpoiservice/server/models.py
from openpoiservice.server import db, ops_settings
from geoalchemy2 import Geography
import logging
logger = logging.getLogger(__name__)
class POIs(db.Model):
__tablename__ = ops_settings['provider_parameters']['table_name']
logger.info(f"Table name for POIs: {__tablename__}")
osm_type = db.Column(db.Integer, primary_key=True)
osm_id = db.Column(db.BigInteger, primary_key=True)
geom = db.Column(Geography(geometry_type="POINT", srid=4326, spatial_index=True), nullable=False)
src_index = db.Column(db.Integer, index=True)
delflag = db.Column(db.Boolean, nullable=False, index=True)
tags = db.relationship("Tags", backref=db.backref("POIs", cascade="delete"), lazy='dynamic')
categories = db.relationship("Categories", backref=db.backref("POIs", cascade="delete"), lazy='dynamic')
def __repr__(self):
return '<osm id %r>' % self.osm_id
class Categories(db.Model):
__tablename__ = ops_settings['provider_parameters']['table_name'] + "_categories"
logger.info(f"Table name for categories: {__tablename__}")
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
osm_type = db.Column(db.Integer, nullable=False, index=True)
osm_id = db.Column(db.BigInteger, nullable=False, index=True)
category = db.Column(db.Integer, index=True, nullable=False)
__table_args__ = (db.ForeignKeyConstraint([osm_type, osm_id], [POIs.osm_type, POIs.osm_id], ondelete="CASCADE"),)
def __repr__(self):
return '<category %r>' % self.category
class Tags(db.Model):
__tablename__ = ops_settings['provider_parameters']['table_name'] + "_tags"
logger.info(f"Table name for tags: {__tablename__}")
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
osm_type = db.Column(db.Integer, nullable=False, index=True)
osm_id = db.Column(db.BigInteger, nullable=False, index=True)
key = db.Column(db.Text, nullable=True, index=True)
value = db.Column(db.Text, nullable=True, index=True)
__table_args__ = (db.ForeignKeyConstraint([osm_type, osm_id], [POIs.osm_type, POIs.osm_id], ondelete="CASCADE"),)
def __repr__(self):
return '<osm id %r>' % self.osm_id
|
play-1.2.4/python/Lib/site-packages/win32/lib/sspicon.py | AppSecAI-TEST/restcommander | 550 | 11138201 | # Generated by h2py from c:\microsoft sdk\include\sspi.h
ISSP_LEVEL = 32
ISSP_MODE = 1
ISSP_LEVEL = 32
ISSP_MODE = 0
ISSP_LEVEL = 32
ISSP_MODE = 1
def SEC_SUCCESS(Status): return ((Status) >= 0)
SECPKG_FLAG_INTEGRITY = 1
SECPKG_FLAG_PRIVACY = 2
SECPKG_FLAG_TOKEN_ONLY = 4
SECPKG_FLAG_DATAGRAM = 8
SECPKG_FLAG_CONNECTION = 16
SECPKG_FLAG_MULTI_REQUIRED = 32
SECPKG_FLAG_CLIENT_ONLY = 64
SECPKG_FLAG_EXTENDED_ERROR = 128
SECPKG_FLAG_IMPERSONATION = 256
SECPKG_FLAG_ACCEPT_WIN32_NAME = 512
SECPKG_FLAG_STREAM = 1024
SECPKG_FLAG_NEGOTIABLE = 2048
SECPKG_FLAG_GSS_COMPATIBLE = 4096
SECPKG_FLAG_LOGON = 8192
SECPKG_FLAG_ASCII_BUFFERS = 16384
SECPKG_FLAG_FRAGMENT = 32768
SECPKG_FLAG_MUTUAL_AUTH = 65536
SECPKG_FLAG_DELEGATION = 131072
SECPKG_FLAG_READONLY_WITH_CHECKSUM = 262144
SECPKG_ID_NONE = 65535
SECBUFFER_VERSION = 0
SECBUFFER_EMPTY = 0
SECBUFFER_DATA = 1
SECBUFFER_TOKEN = 2
SECBUFFER_PKG_PARAMS = 3
SECBUFFER_MISSING = 4
SECBUFFER_EXTRA = 5
SECBUFFER_STREAM_TRAILER = 6
SECBUFFER_STREAM_HEADER = 7
SECBUFFER_NEGOTIATION_INFO = 8
SECBUFFER_PADDING = 9
SECBUFFER_STREAM = 10
SECBUFFER_MECHLIST = 11
SECBUFFER_MECHLIST_SIGNATURE = 12
SECBUFFER_TARGET = 13
SECBUFFER_CHANNEL_BINDINGS = 14
SECBUFFER_ATTRMASK = (-268435456)
SECBUFFER_READONLY = (-2147483648)
SECBUFFER_READONLY_WITH_CHECKSUM = 268435456
SECBUFFER_RESERVED = 1610612736
SECURITY_NATIVE_DREP = 16
SECURITY_NETWORK_DREP = 0
SECPKG_CRED_INBOUND = 1
SECPKG_CRED_OUTBOUND = 2
SECPKG_CRED_BOTH = 3
SECPKG_CRED_DEFAULT = 4
SECPKG_CRED_RESERVED = -268435456
ISC_REQ_DELEGATE = 1
ISC_REQ_MUTUAL_AUTH = 2
ISC_REQ_REPLAY_DETECT = 4
ISC_REQ_SEQUENCE_DETECT = 8
ISC_REQ_CONFIDENTIALITY = 16
ISC_REQ_USE_SESSION_KEY = 32
ISC_REQ_PROMPT_FOR_CREDS = 64
ISC_REQ_USE_SUPPLIED_CREDS = 128
ISC_REQ_ALLOCATE_MEMORY = 256
ISC_REQ_USE_DCE_STYLE = 512
ISC_REQ_DATAGRAM = 1024
ISC_REQ_CONNECTION = 2048
ISC_REQ_CALL_LEVEL = 4096
ISC_REQ_FRAGMENT_SUPPLIED = 8192
ISC_REQ_EXTENDED_ERROR = 16384
ISC_REQ_STREAM = 32768
ISC_REQ_INTEGRITY = 65536
ISC_REQ_IDENTIFY = 131072
ISC_REQ_NULL_SESSION = 262144
ISC_REQ_MANUAL_CRED_VALIDATION = 524288
ISC_REQ_RESERVED1 = 1048576
ISC_REQ_FRAGMENT_TO_FIT = 2097152
ISC_REQ_HTTP = 0x10000000
ISC_RET_DELEGATE = 1
ISC_RET_MUTUAL_AUTH = 2
ISC_RET_REPLAY_DETECT = 4
ISC_RET_SEQUENCE_DETECT = 8
ISC_RET_CONFIDENTIALITY = 16
ISC_RET_USE_SESSION_KEY = 32
ISC_RET_USED_COLLECTED_CREDS = 64
ISC_RET_USED_SUPPLIED_CREDS = 128
ISC_RET_ALLOCATED_MEMORY = 256
ISC_RET_USED_DCE_STYLE = 512
ISC_RET_DATAGRAM = 1024
ISC_RET_CONNECTION = 2048
ISC_RET_INTERMEDIATE_RETURN = 4096
ISC_RET_CALL_LEVEL = 8192
ISC_RET_EXTENDED_ERROR = 16384
ISC_RET_STREAM = 32768
ISC_RET_INTEGRITY = 65536
ISC_RET_IDENTIFY = 131072
ISC_RET_NULL_SESSION = 262144
ISC_RET_MANUAL_CRED_VALIDATION = 524288
ISC_RET_RESERVED1 = 1048576
ISC_RET_FRAGMENT_ONLY = 2097152
ASC_REQ_DELEGATE = 1
ASC_REQ_MUTUAL_AUTH = 2
ASC_REQ_REPLAY_DETECT = 4
ASC_REQ_SEQUENCE_DETECT = 8
ASC_REQ_CONFIDENTIALITY = 16
ASC_REQ_USE_SESSION_KEY = 32
ASC_REQ_ALLOCATE_MEMORY = 256
ASC_REQ_USE_DCE_STYLE = 512
ASC_REQ_DATAGRAM = 1024
ASC_REQ_CONNECTION = 2048
ASC_REQ_CALL_LEVEL = 4096
ASC_REQ_EXTENDED_ERROR = 32768
ASC_REQ_STREAM = 65536
ASC_REQ_INTEGRITY = 131072
ASC_REQ_LICENSING = 262144
ASC_REQ_IDENTIFY = 524288
ASC_REQ_ALLOW_NULL_SESSION = 1048576
ASC_REQ_ALLOW_NON_USER_LOGONS = 2097152
ASC_REQ_ALLOW_CONTEXT_REPLAY = 4194304
ASC_REQ_FRAGMENT_TO_FIT = 8388608
ASC_REQ_FRAGMENT_SUPPLIED = 8192
ASC_REQ_NO_TOKEN = <PASSWORD>
ASC_RET_DELEGATE = 1
ASC_RET_MUTUAL_AUTH = 2
ASC_RET_REPLAY_DETECT = 4
ASC_RET_SEQUENCE_DETECT = 8
ASC_RET_CONFIDENTIALITY = 16
ASC_RET_USE_SESSION_KEY = 32
ASC_RET_ALLOCATED_MEMORY = 256
ASC_RET_USED_DCE_STYLE = 512
ASC_RET_DATAGRAM = 1024
ASC_RET_CONNECTION = 2048
ASC_RET_CALL_LEVEL = 8192
ASC_RET_THIRD_LEG_FAILED = 16384
ASC_RET_EXTENDED_ERROR = 32768
ASC_RET_STREAM = 65536
ASC_RET_INTEGRITY = 131072
ASC_RET_LICENSING = 262144
ASC_RET_IDENTIFY = 524288
ASC_RET_NULL_SESSION = 1048576
ASC_RET_ALLOW_NON_USER_LOGONS = 2097152
ASC_RET_ALLOW_CONTEXT_REPLAY = 4194304
ASC_RET_FRAGMENT_ONLY = 8388608
SECPKG_CRED_ATTR_NAMES = 1
SECPKG_ATTR_SIZES = 0
SECPKG_ATTR_NAMES = 1
SECPKG_ATTR_LIFESPAN = 2
SECPKG_ATTR_DCE_INFO = 3
SECPKG_ATTR_STREAM_SIZES = 4
SECPKG_ATTR_KEY_INFO = 5
SECPKG_ATTR_AUTHORITY = 6
SECPKG_ATTR_PROTO_INFO = 7
SECPKG_ATTR_PASSWORD_EXPIRY = 8
SECPKG_ATTR_SESSION_KEY = 9
SECPKG_ATTR_PACKAGE_INFO = 10
SECPKG_ATTR_USER_FLAGS = 11
SECPKG_ATTR_NEGOTIATION_INFO = 12
SECPKG_ATTR_NATIVE_NAMES = 13
SECPKG_ATTR_FLAGS = 14
SECPKG_ATTR_USE_VALIDATED = 15
SECPKG_ATTR_CREDENTIAL_NAME = 16
SECPKG_ATTR_TARGET_INFORMATION = 17
SECPKG_ATTR_ACCESS_TOKEN = 18
SECPKG_ATTR_TARGET = 19
SECPKG_ATTR_AUTHENTICATION_ID = 20
## attributes from schannel.h
SECPKG_ATTR_REMOTE_CERT_CONTEXT = 83
SECPKG_ATTR_LOCAL_CERT_CONTEXT = 84
SECPKG_ATTR_ROOT_STORE = 85
SECPKG_ATTR_SUPPORTED_ALGS = 86
SECPKG_ATTR_CIPHER_STRENGTHS = 87
SECPKG_ATTR_SUPPORTED_PROTOCOLS = 88
SECPKG_ATTR_ISSUER_LIST_EX = 89
SECPKG_ATTR_CONNECTION_INFO = 90
SECPKG_ATTR_EAP_KEY_BLOCK = 91
SECPKG_ATTR_MAPPED_CRED_ATTR = 92
SECPKG_ATTR_SESSION_INFO = 93
SECPKG_ATTR_APP_DATA = 94
SECPKG_NEGOTIATION_COMPLETE = 0
SECPKG_NEGOTIATION_OPTIMISTIC = 1
SECPKG_NEGOTIATION_IN_PROGRESS = 2
SECPKG_NEGOTIATION_DIRECT = 3
SECPKG_NEGOTIATION_TRY_MULTICRED = 4
SECPKG_CONTEXT_EXPORT_RESET_NEW = 1
SECPKG_CONTEXT_EXPORT_DELETE_OLD = 2
SECQOP_WRAP_NO_ENCRYPT = (-2147483647)
SECURITY_ENTRYPOINT_ANSIW = "InitSecurityInterfaceW"
SECURITY_ENTRYPOINT_ANSIA = "InitSecurityInterfaceA"
SECURITY_ENTRYPOINT16 = "INITSECURITYINTERFACEA"
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT_ANSIW
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT_ANSIA
SECURITY_ENTRYPOINT = SECURITY_ENTRYPOINT16
SECURITY_ENTRYPOINT_ANSI = SECURITY_ENTRYPOINT16
SECURITY_SUPPORT_PROVIDER_INTERFACE_VERSION = 1
SECURITY_SUPPORT_PROVIDER_INTERFACE_VERSION_2 = 2
SASL_OPTION_SEND_SIZE = 1
SASL_OPTION_RECV_SIZE = 2
SASL_OPTION_AUTHZ_STRING = 3
SASL_OPTION_AUTHZ_PROCESSING = 4
SEC_WINNT_AUTH_IDENTITY_ANSI = 1
SEC_WINNT_AUTH_IDENTITY_UNICODE = 2
SEC_WINNT_AUTH_IDENTITY_VERSION = 512
SEC_WINNT_AUTH_IDENTITY_MARSHALLED = 4
SEC_WINNT_AUTH_IDENTITY_ONLY = 8
SECPKG_OPTIONS_TYPE_UNKNOWN = 0
SECPKG_OPTIONS_TYPE_LSA = 1
SECPKG_OPTIONS_TYPE_SSPI = 2
SECPKG_OPTIONS_PERMANENT = 1
SEC_E_INSUFFICIENT_MEMORY = -2146893056
SEC_E_INVALID_HANDLE = -2146893055
SEC_E_UNSUPPORTED_FUNCTION = -2146893054
SEC_E_TARGET_UNKNOWN = -2146893053
SEC_E_INTERNAL_ERROR = -2146893052
SEC_E_SECPKG_NOT_FOUND = -2146893051
SEC_E_NOT_OWNER = -2146893050
SEC_E_CANNOT_INSTALL = -2146893049
SEC_E_INVALID_TOKEN = -2146893048
SEC_E_CANNOT_PACK = -2146893047
SEC_E_QOP_NOT_SUPPORTED = -2146893046
SEC_E_NO_IMPERSONATION = -2146893045
SEC_E_LOGON_DENIED = -2146893044
SEC_E_UNKNOWN_CREDENTIALS = -2146893043
SEC_E_NO_CREDENTIALS = -2146893042
SEC_E_MESSAGE_ALTERED = -2146893041
SEC_E_OUT_OF_SEQUENCE = -2146893040
SEC_E_NO_AUTHENTICATING_AUTHORITY = -2146893039
SEC_I_CONTINUE_NEEDED = 590610
SEC_I_COMPLETE_NEEDED = 590611
SEC_I_COMPLETE_AND_CONTINUE = 590612
SEC_I_LOCAL_LOGON = 590613
SEC_E_BAD_PKGID = -2146893034
SEC_E_CONTEXT_EXPIRED = -2146893033
SEC_I_CONTEXT_EXPIRED = 590615
SEC_E_INCOMPLETE_MESSAGE = -2146893032
SEC_E_INCOMPLETE_CREDENTIALS = -2146893024
SEC_E_BUFFER_TOO_SMALL = -2146893023
SEC_I_INCOMPLETE_CREDENTIALS = 590624
SEC_I_RENEGOTIATE = 590625
SEC_E_WRONG_PRINCIPAL = -2146893022
SEC_I_NO_LSA_CONTEXT = 590627
SEC_E_TIME_SKEW = -2146893020
SEC_E_UNTRUSTED_ROOT = -2146893019
SEC_E_ILLEGAL_MESSAGE = -2146893018
SEC_E_CERT_UNKNOWN = -2146893017
SEC_E_CERT_EXPIRED = -2146893016
SEC_E_ENCRYPT_FAILURE = -2146893015
SEC_E_DECRYPT_FAILURE = -2146893008
SEC_E_ALGORITHM_MISMATCH = -2146893007
SEC_E_SECURITY_QOS_FAILED = -2146893006
SEC_E_UNFINISHED_CONTEXT_DELETED = -2146893005
SEC_E_NO_TGT_REPLY = -2146893004
SEC_E_NO_IP_ADDRESSES = -2146893003
SEC_E_WRONG_CREDENTIAL_HANDLE = -2146893002
SEC_E_CRYPTO_SYSTEM_INVALID = -2146893001
SEC_E_MAX_REFERRALS_EXCEEDED = -2146893000
SEC_E_MUST_BE_KDC = -2146892999
SEC_E_STRONG_CRYPTO_NOT_SUPPORTED = -2146892998
SEC_E_TOO_MANY_PRINCIPALS = -2146892997
SEC_E_NO_PA_DATA = -2146892996
SEC_E_PKINIT_NAME_MISMATCH = -2146892995
SEC_E_SMARTCARD_LOGON_REQUIRED = -2146892994
SEC_E_SHUTDOWN_IN_PROGRESS = -2146892993
SEC_E_KDC_INVALID_REQUEST = -2146892992
SEC_E_KDC_UNABLE_TO_REFER = -2146892991
SEC_E_KDC_UNKNOWN_ETYPE = -2146892990
SEC_E_UNSUPPORTED_PREAUTH = -2146892989
SEC_E_DELEGATION_REQUIRED = -2146892987
SEC_E_BAD_BINDINGS = -2146892986
SEC_E_MULTIPLE_ACCOUNTS = -2146892985
SEC_E_NO_KERB_KEY = -2146892984
ERROR_IPSEC_QM_POLICY_EXISTS = 13000L
ERROR_IPSEC_QM_POLICY_NOT_FOUND = 13001L
ERROR_IPSEC_QM_POLICY_IN_USE = 13002L
ERROR_IPSEC_MM_POLICY_EXISTS = 13003L
ERROR_IPSEC_MM_POLICY_NOT_FOUND = 13004L
ERROR_IPSEC_MM_POLICY_IN_USE = 13005L
ERROR_IPSEC_MM_FILTER_EXISTS = 13006L
ERROR_IPSEC_MM_FILTER_NOT_FOUND = 13007L
ERROR_IPSEC_TRANSPORT_FILTER_EXISTS = 13008L
ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND = 13009L
ERROR_IPSEC_MM_AUTH_EXISTS = 13010L
ERROR_IPSEC_MM_AUTH_NOT_FOUND = 13011L
ERROR_IPSEC_MM_AUTH_IN_USE = 13012L
ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND = 13013L
ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND = 13014L
ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND = 13015L
ERROR_IPSEC_TUNNEL_FILTER_EXISTS = 13016L
ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND = 13017L
ERROR_IPSEC_MM_FILTER_PENDING_DELETION = 13018L
ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION = 13019L
ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION = 13020L
ERROR_IPSEC_MM_POLICY_PENDING_DELETION = 13021L
ERROR_IPSEC_MM_AUTH_PENDING_DELETION = 13022L
ERROR_IPSEC_QM_POLICY_PENDING_DELETION = 13023L
WARNING_IPSEC_MM_POLICY_PRUNED = 13024L
WARNING_IPSEC_QM_POLICY_PRUNED = 13025L
ERROR_IPSEC_IKE_NEG_STATUS_BEGIN = 13800L
ERROR_IPSEC_IKE_AUTH_FAIL = 13801L
ERROR_IPSEC_IKE_ATTRIB_FAIL = 13802L
ERROR_IPSEC_IKE_NEGOTIATION_PENDING = 13803L
ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR = 13804L
ERROR_IPSEC_IKE_TIMED_OUT = 13805L
ERROR_IPSEC_IKE_NO_CERT = 13806L
ERROR_IPSEC_IKE_SA_DELETED = 13807L
ERROR_IPSEC_IKE_SA_REAPED = 13808L
ERROR_IPSEC_IKE_MM_ACQUIRE_DROP = 13809L
ERROR_IPSEC_IKE_QM_ACQUIRE_DROP = 13810L
ERROR_IPSEC_IKE_QUEUE_DROP_MM = 13811L
ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM = 13812L
ERROR_IPSEC_IKE_DROP_NO_RESPONSE = 13813L
ERROR_IPSEC_IKE_MM_DELAY_DROP = 13814L
ERROR_IPSEC_IKE_QM_DELAY_DROP = 13815L
ERROR_IPSEC_IKE_ERROR = 13816L
ERROR_IPSEC_IKE_CRL_FAILED = 13817L
ERROR_IPSEC_IKE_INVALID_KEY_USAGE = 13818L
ERROR_IPSEC_IKE_INVALID_CERT_TYPE = 13819L
ERROR_IPSEC_IKE_NO_PRIVATE_KEY = 13820L
ERROR_IPSEC_IKE_DH_FAIL = 13822L
ERROR_IPSEC_IKE_INVALID_HEADER = 13824L
ERROR_IPSEC_IKE_NO_POLICY = 13825L
ERROR_IPSEC_IKE_INVALID_SIGNATURE = 13826L
ERROR_IPSEC_IKE_KERBEROS_ERROR = 13827L
ERROR_IPSEC_IKE_NO_PUBLIC_KEY = 13828L
ERROR_IPSEC_IKE_PROCESS_ERR = 13829L
ERROR_IPSEC_IKE_PROCESS_ERR_SA = 13830L
ERROR_IPSEC_IKE_PROCESS_ERR_PROP = 13831L
ERROR_IPSEC_IKE_PROCESS_ERR_TRANS = 13832L
ERROR_IPSEC_IKE_PROCESS_ERR_KE = 13833L
ERROR_IPSEC_IKE_PROCESS_ERR_ID = 13834L
ERROR_IPSEC_IKE_PROCESS_ERR_CERT = 13835L
ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ = 13836L
ERROR_IPSEC_IKE_PROCESS_ERR_HASH = 13837L
ERROR_IPSEC_IKE_PROCESS_ERR_SIG = 13838L
ERROR_IPSEC_IKE_PROCESS_ERR_NONCE = 13839L
ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY = 13840L
ERROR_IPSEC_IKE_PROCESS_ERR_DELETE = 13841L
ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR = 13842L
ERROR_IPSEC_IKE_INVALID_PAYLOAD = 13843L
ERROR_IPSEC_IKE_LOAD_SOFT_SA = 13844L
ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN = 13845L
ERROR_IPSEC_IKE_INVALID_COOKIE = 13846L
ERROR_IPSEC_IKE_NO_PEER_CERT = 13847L
ERROR_IPSEC_IKE_PEER_CRL_FAILED = 13848L
ERROR_IPSEC_IKE_POLICY_CHANGE = 13849L
ERROR_IPSEC_IKE_NO_MM_POLICY = 13850L
ERROR_IPSEC_IKE_NOTCBPRIV = 13851L
ERROR_IPSEC_IKE_SECLOADFAIL = 13852L
ERROR_IPSEC_IKE_FAILSSPINIT = 13853L
ERROR_IPSEC_IKE_FAILQUERYSSP = 13854L
ERROR_IPSEC_IKE_SRVACQFAIL = 13855L
ERROR_IPSEC_IKE_SRVQUERYCRED = 13856L
ERROR_IPSEC_IKE_GETSPIFAIL = 13857L
ERROR_IPSEC_IKE_INVALID_FILTER = 13858L
ERROR_IPSEC_IKE_OUT_OF_MEMORY = 13859L
ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED = 13860L
ERROR_IPSEC_IKE_INVALID_POLICY = 13861L
ERROR_IPSEC_IKE_UNKNOWN_DOI = 13862L
ERROR_IPSEC_IKE_INVALID_SITUATION = 13863L
ERROR_IPSEC_IKE_DH_FAILURE = 13864L
ERROR_IPSEC_IKE_INVALID_GROUP = 13865L
ERROR_IPSEC_IKE_ENCRYPT = 13866L
ERROR_IPSEC_IKE_DECRYPT = 13867L
ERROR_IPSEC_IKE_POLICY_MATCH = 13868L
ERROR_IPSEC_IKE_UNSUPPORTED_ID = 13869L
ERROR_IPSEC_IKE_INVALID_HASH = 13870L
ERROR_IPSEC_IKE_INVALID_HASH_ALG = 13871L
ERROR_IPSEC_IKE_INVALID_HASH_SIZE = 13872L
ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG = 13873L
ERROR_IPSEC_IKE_INVALID_AUTH_ALG = 13874L
ERROR_IPSEC_IKE_INVALID_SIG = 13875L
ERROR_IPSEC_IKE_LOAD_FAILED = 13876L
ERROR_IPSEC_IKE_RPC_DELETE = 13877L
ERROR_IPSEC_IKE_BENIGN_REINIT = 13878L
ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY = 13879L
ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN = 13881L
ERROR_IPSEC_IKE_MM_LIMIT = 13882L
ERROR_IPSEC_IKE_NEGOTIATION_DISABLED = 13883L
ERROR_IPSEC_IKE_NEG_STATUS_END = 13884L
CRYPT_E_MSG_ERROR = ((-2146889727))
CRYPT_E_UNKNOWN_ALGO = ((-2146889726))
CRYPT_E_OID_FORMAT = ((-2146889725))
CRYPT_E_INVALID_MSG_TYPE = ((-2146889724))
CRYPT_E_UNEXPECTED_ENCODING = ((-2146889723))
CRYPT_E_AUTH_ATTR_MISSING = ((-2146889722))
CRYPT_E_HASH_VALUE = ((-2146889721))
CRYPT_E_INVALID_INDEX = ((-2146889720))
CRYPT_E_ALREADY_DECRYPTED = ((-2146889719))
CRYPT_E_NOT_DECRYPTED = ((-2146889718))
CRYPT_E_RECIPIENT_NOT_FOUND = ((-2146889717))
CRYPT_E_CONTROL_TYPE = ((-2146889716))
CRYPT_E_ISSUER_SERIALNUMBER = ((-2146889715))
CRYPT_E_SIGNER_NOT_FOUND = ((-2146889714))
CRYPT_E_ATTRIBUTES_MISSING = ((-2146889713))
CRYPT_E_STREAM_MSG_NOT_READY = ((-2146889712))
CRYPT_E_STREAM_INSUFFICIENT_DATA = ((-2146889711))
CRYPT_I_NEW_PROTECTION_REQUIRED = (593938)
CRYPT_E_BAD_LEN = ((-2146885631))
CRYPT_E_BAD_ENCODE = ((-2146885630))
CRYPT_E_FILE_ERROR = ((-2146885629))
CRYPT_E_NOT_FOUND = ((-2146885628))
CRYPT_E_EXISTS = ((-2146885627))
CRYPT_E_NO_PROVIDER = ((-2146885626))
CRYPT_E_SELF_SIGNED = ((-2146885625))
CRYPT_E_DELETED_PREV = ((-2146885624))
CRYPT_E_NO_MATCH = ((-2146885623))
CRYPT_E_UNEXPECTED_MSG_TYPE = ((-2146885622))
CRYPT_E_NO_KEY_PROPERTY = ((-2146885621))
CRYPT_E_NO_DECRYPT_CERT = ((-2146885620))
CRYPT_E_BAD_MSG = ((-2146885619))
CRYPT_E_NO_SIGNER = ((-2146885618))
CRYPT_E_PENDING_CLOSE = ((-2146885617))
CRYPT_E_REVOKED = ((-2146885616))
CRYPT_E_NO_REVOCATION_DLL = ((-2146885615))
CRYPT_E_NO_REVOCATION_CHECK = ((-2146885614))
CRYPT_E_REVOCATION_OFFLINE = ((-2146885613))
CRYPT_E_NOT_IN_REVOCATION_DATABASE = ((-2146885612))
CRYPT_E_INVALID_NUMERIC_STRING = ((-2146885600))
CRYPT_E_INVALID_PRINTABLE_STRING = ((-2146885599))
CRYPT_E_INVALID_IA5_STRING = ((-2146885598))
CRYPT_E_INVALID_X500_STRING = ((-2146885597))
CRYPT_E_NOT_CHAR_STRING = ((-2146885596))
CRYPT_E_FILERESIZED = ((-2146885595))
CRYPT_E_SECURITY_SETTINGS = ((-2146885594))
CRYPT_E_NO_VERIFY_USAGE_DLL = ((-2146885593))
CRYPT_E_NO_VERIFY_USAGE_CHECK = ((-2146885592))
CRYPT_E_VERIFY_USAGE_OFFLINE = ((-2146885591))
CRYPT_E_NOT_IN_CTL = ((-2146885590))
CRYPT_E_NO_TRUSTED_SIGNER = ((-2146885589))
CRYPT_E_MISSING_PUBKEY_PARA = ((-2146885588))
CRYPT_E_OSS_ERROR = ((-2146881536))
## Kerberos message types for LsaCallAuthenticationPackage (from ntsecapi.h)
KerbDebugRequestMessage = 0
KerbQueryTicketCacheMessage = 1
KerbChangeMachinePasswordMessage = 2
KerbVerifyPacMessage = 3
KerbRetrieveTicketMessage = 4
KerbUpdateAddressesMessage = 5
KerbPurgeTicketCacheMessage = 6
KerbChangePasswordMessage = 7
KerbRetrieveEncodedTicketMessage = 8
KerbDecryptDataMessage = 9
KerbAddBindingCacheEntryMessage = 10
KerbSetPasswordMessage = 11
KerbSetPasswordExMessage = 12
KerbVerifyCredentialsMessage = 13
KerbQueryTicketCacheExMessage = 14
KerbPurgeTicketCacheExMessage = 15
KerbRefreshSmartcardCredentialsMessage = 16
KerbAddExtraCredentialsMessage = 17
KerbQuerySupplementalCredentialsMessage = 18
## messages used with msv1_0 from ntsecapi.h
MsV1_0Lm20ChallengeRequest = 0
MsV1_0Lm20GetChallengeResponse = 1
MsV1_0EnumerateUsers = 2
MsV1_0GetUserInfo = 3
MsV1_0ReLogonUsers = 4
MsV1_0ChangePassword = 5
MsV1_0ChangeCachedPassword = 6
MsV1_0GenericPassthrough = 7
MsV1_0CacheLogon = 8
MsV1_0SubAuth = 9
MsV1_0DeriveCredential = 10
MsV1_0CacheLookup = 11
MsV1_0SetProcessOption = 12
SEC_E_OK = 0
|
terrascript/testing/d.py | mjuenema/python-terrascript | 507 | 11138229 | # terrascript/testing/d.py
# Automatically generated by tools/makecode.py ()
import warnings
warnings.warn(
"using the 'legacy layout' is deprecated", DeprecationWarning, stacklevel=2
)
import terrascript
class testing_assertions(terrascript.Data):
pass
class testing_tap(terrascript.Data):
pass
|
octoprint/tests/test_octoprint.py | divyamamgai/integrations-extras | 158 | 11138262 | import mock
import pytest
from datadog_checks.octoprint import OctoPrintCheck
@pytest.mark.skip
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
@mock.patch('datadog_checks.octoprint.OctoPrintCheck.get_rpi_core_temp')
def test_check(mock_rpi_temp, aggregator, mock_api_request, instance):
mock_rpi_temp.return_value = 49.0
check = OctoPrintCheck('octoprint', {}, [instance])
check.check(instance)
aggregator.assert_metric("octoprint.rpi_core_temp", 0.0, count=1)
aggregator.assert_metric("octoprint.printer_state", 1, count=1)
aggregator.assert_metric("octoprint.pct_completed", 0, count=1)
aggregator.assert_metric("octoprint.print_job_time", 1, count=1)
aggregator.assert_metric("octoprint.print_job_time_left", 9999999, count=1)
aggregator.assert_metric("octoprint.current_tool_temp", 50.0, count=1)
aggregator.assert_metric("octoprint.target_tool_temp", 190.0, count=1)
aggregator.assert_metric("octoprint.current_bed_temp", 68.0, count=1)
aggregator.assert_metric("octoprint.target_bed_temp", 70.0, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.unit
@mock.patch('datadog_checks.octoprint.OctoPrintCheck.get_rpi_core_temp')
def test_empty_job(mock_rpi_temp, aggregator, mock_empty_api_request, instance):
mock_rpi_temp.return_value = 49.0
check = OctoPrintCheck('octoprint', {}, [instance])
check.check(instance)
aggregator.assert_metric("octoprint.rpi_core_temp", 49.0, count=1)
aggregator.assert_metric("octoprint.printer_state", 0, count=1)
aggregator.assert_metric("octoprint.current_tool_temp", 25.0, count=1)
aggregator.assert_metric("octoprint.target_tool_temp", 200.0, count=1)
aggregator.assert_metric("octoprint.current_bed_temp", 24.77, count=1)
aggregator.assert_metric("octoprint.target_bed_temp", 70.0, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.unit
@mock.patch('datadog_checks.octoprint.OctoPrintCheck.get_rpi_core_temp')
def test_active_job(mock_rpi_temp, aggregator, mock_active_api_request, instance):
mock_rpi_temp.return_value = 49.0
check = OctoPrintCheck('octoprint', {}, [instance])
check.check(instance)
aggregator.assert_metric("octoprint.rpi_core_temp", 49.0, count=1)
aggregator.assert_metric("octoprint.printer_state", 2, count=1)
aggregator.assert_metric("octoprint.est_print_time", 146, count=1)
aggregator.assert_metric("octoprint.pct_completed", 0.22, count=1)
aggregator.assert_metric("octoprint.print_job_time", 4, count=1)
aggregator.assert_metric("octoprint.print_job_time_left", 15, count=1)
aggregator.assert_metric("octoprint.current_tool_temp", 25.0, count=1)
aggregator.assert_metric("octoprint.target_tool_temp", 200.0, count=1)
aggregator.assert_metric("octoprint.current_bed_temp", 24.77, count=1)
aggregator.assert_metric("octoprint.target_bed_temp", 70.0, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.e2e
def test_e2e():
return True
|
examples/kddcup2021/MAG240M/r_unimp/dataset/sage_all_data.py | zbmain/PGL | 1,389 | 11138264 | <filename>examples/kddcup2021/MAG240M/r_unimp/dataset/sage_all_data.py
import os
import yaml
import pgl
import time
import copy
import numpy as np
import os.path as osp
from pgl.utils.logger import log
from pgl.graph import Graph
from pgl import graph_kernel
from pgl.sampling.custom import subgraph
from ogb.lsc import MAG240MDataset, MAG240MEvaluator
import time
from tqdm import tqdm
class MAG240M(object):
"""Iterator"""
def __init__(self, data_dir):
self.num_features = 768
self.num_classes = 153
self.data_dir = data_dir
def prepare_data(self):
dataset = MAG240MDataset(self.data_dir)
graph_file_list = []
paper_edge_path = f'{dataset.dir}/paper_to_paper_symmetric_pgl_split'
graph_file_list.append(paper_edge_path)
t = time.perf_counter()
if not osp.exists(paper_edge_path):
log.info('Converting adjacency matrix...')
edge_index = dataset.edge_index('paper', 'cites', 'paper')
edge_index = edge_index.T
edges_new = np.zeros((edge_index.shape[0], 2))
edges_new[:, 0] = edge_index[:, 1]
edges_new[:, 1] = edge_index[:, 0]
edge_index = np.vstack((edge_index, edges_new))
edge_types = np.full([edge_index.shape[0], ], 0, dtype='int32')
graph = Graph(edge_index, num_nodes=dataset.num_papers, edge_feat={'edge_type': edge_types})
graph.adj_dst_index
graph.dump(paper_edge_path)
log.info(f'Done! [{time.perf_counter() - t:.2f}s]')
author_edge_path = f'{dataset.dir}/paper_to_author_symmetric_pgl_split_src'
graph_file_list.append(author_edge_path)
t = time.perf_counter()
if not osp.exists(author_edge_path):
log.info('Converting author matrix...')
# author
log.info('adding author edges')
edge_index = dataset.edge_index('author', 'writes', 'paper')
edge_index = edge_index.T
row, col = edge_index[:, 0], edge_index[:, 1]
log.info(row[:10])
row += dataset.num_papers
edge_types = np.full(row.shape, 1, dtype='int32')
edge_index = np.stack([row, col], axis=1)
graph = Graph(edge_index, edge_feat={'edge_type': edge_types})
graph.adj_dst_index
graph.dump(author_edge_path)
log.info(f'Done! finish author_edge [{time.perf_counter() - t:.2f}s]')
author_edge_path = f'{dataset.dir}/paper_to_author_symmetric_pgl_split_dst'
graph_file_list.append(author_edge_path)
t = time.perf_counter()
if not osp.exists(author_edge_path):
log.info('Converting author matrix...')
# author
log.info('adding author edges')
edge_index = dataset.edge_index('author', 'writes', 'paper')
edge_index = edge_index.T
row, col = edge_index[:, 0], edge_index[:, 1]
log.info(row[:10])
row += dataset.num_papers
edge_types = np.full(row.shape, 2, dtype='int32')
edge_index = np.stack([col, row], axis=1)
graph = Graph(edge_index, edge_feat={'edge_type': edge_types})
graph.adj_dst_index
graph.dump(author_edge_path)
log.info(f'Done! finish author_edge [{time.perf_counter() - t:.2f}s]')
institution_edge_path = f'{dataset.dir}/institution_edge_symmetric_pgl_split_src'
graph_file_list.append(institution_edge_path)
t = time.perf_counter()
if not osp.exists(institution_edge_path):
log.info('Converting institution matrix...')
# institution
log.info('adding institution edges')
edge_index = dataset.edge_index('author', 'institution')
edge_index = edge_index.T
row, col = edge_index[:, 0], edge_index[:, 1]
log.info(row[:10])
row += dataset.num_papers
col += dataset.num_papers + dataset.num_authors
# edge_type
log.info('building edge type')
edge_types = np.full(row.shape, 3, dtype='int32')
edge_index = np.stack([row, col], axis=1)
graph = Graph(edge_index, edge_feat={'edge_type': edge_types})
graph.adj_dst_index
graph.dump(institution_edge_path)
log.info(f'Done! finish institution_edge [{time.perf_counter() - t:.2f}s]')
institution_edge_path = f'{dataset.dir}/institution_edge_symmetric_pgl_split_dst'
graph_file_list.append(institution_edge_path)
t = time.perf_counter()
if not osp.exists(institution_edge_path):
log.info('Converting institution matrix...')
# institution
log.info('adding institution edges')
edge_index = dataset.edge_index('author', 'institution')
edge_index = edge_index.T
row, col = edge_index[:, 0], edge_index[:, 1]
log.info(row[:10])
row += dataset.num_papers
col += dataset.num_papers + dataset.num_authors
# edge_type
log.info('building edge type')
edge_types = np.full(row.shape, 4, dtype='int32')
edge_index = np.stack([col, row], axis=1)
graph = Graph(edge_index, edge_feat={'edge_type': edge_types})
graph.adj_dst_index
graph.dump(institution_edge_path)
log.info(f'Done! finish institution_edge [{time.perf_counter() - t:.2f}s]')
path = f'{dataset.dir}/full_feat.npy'
author_feat_path = f'{dataset.dir}/author_feat.npy'
institution_feat_path = f'{dataset.dir}/institution_feat.npy'
t = time.perf_counter()
if not osp.exists(path): # Will take ~3 hours...
print('Generating full feature matrix...')
node_chunk_size = 100000
N = (dataset.num_papers + dataset.num_authors +
dataset.num_institutions)
paper_feat = dataset.paper_feat
author_feat = np.memmap(author_feat_path, dtype=np.float16,
shape=(dataset.num_authors, self.num_features),
mode='r')
institution_feat = np.memmap(institution_feat_path, dtype=np.float16,
shape=(dataset.num_institutions, self.num_features),
mode='r')
x = np.memmap(path, dtype=np.float16, mode='w+',
shape=(N, self.num_features))
print('Copying paper features...')
start_idx = 0
end_idx = dataset.num_papers
for i in tqdm(range(start_idx, end_idx, node_chunk_size)):
j = min(i + node_chunk_size, end_idx)
x[i: j] = paper_feat[i: j]
del paper_feat
print('Copying author feature...')
start_idx = dataset.num_papers
end_idx = dataset.num_papers + dataset.num_authors
for i in tqdm(range(start_idx, end_idx, node_chunk_size)):
j = min(i + node_chunk_size, end_idx)
x[i: j] = author_feat[i - start_idx: j - start_idx]
del author_feat
print('Copying institution feature...')
start_idx = dataset.num_papers + dataset.num_authors
end_idx = dataset.num_papers + dataset.num_authors + dataset.num_institutions
for i in tqdm(range(start_idx, end_idx, node_chunk_size)):
j = min(i + node_chunk_size, end_idx)
x[i: j] = institution_feat[i - start_idx: j - start_idx]
del institution_feat
x.flush()
del x
print(f'feature x Done! [{time.perf_counter() - t:.2f}s]')
path = f'{dataset.dir}/all_feat_year.npy'
author_year_path = f'{dataset.dir}/author_feat_year.npy'
institution_year_path = f'{dataset.dir}/institution_feat_year.npy'
t = time.perf_counter()
if not osp.exists(path): # Will take ~3 hours...
print('Generating full year matrix...')
node_chunk_size = 100000
N = (dataset.num_papers + dataset.num_authors +
dataset.num_institutions)
paper_year_feat = dataset.all_paper_year
author_year_feat = np.memmap(author_year_path, dtype=np.int32,
shape=(dataset.num_authors),
mode='r')
institution_year_feat = np.memmap(institution_year_path, dtype=np.int32,
shape=(dataset.num_institutions),
mode='r')
x = np.memmap(path, dtype=np.int32, mode='w+',
shape=(N))
print('Copying paper features...')
start_idx = 0
end_idx = dataset.num_papers
for i in tqdm(range(start_idx, end_idx, node_chunk_size)):
j = min(i + node_chunk_size, end_idx)
x[i: j] = paper_year_feat[i: j]
del paper_year_feat
print('Copying author feature...')
start_idx = dataset.num_papers
end_idx = dataset.num_papers + dataset.num_authors
for i in tqdm(range(start_idx, end_idx, node_chunk_size)):
j = min(i + node_chunk_size, end_idx)
x[i: j] = author_year_feat[i - start_idx: j - start_idx]
del author_year_feat
print('Copying institution feature...')
start_idx = dataset.num_papers + dataset.num_authors
end_idx = dataset.num_papers + dataset.num_authors + dataset.num_institutions
for i in tqdm(range(start_idx, end_idx, node_chunk_size)):
j = min(i + node_chunk_size, end_idx)
x[i: j] = institution_year_feat[i - start_idx: j - start_idx]
del institution_year_feat
x.flush()
del x
print(f'year feature Done! [{time.perf_counter() - t:.2f}s]')
if __name__ == "__main__":
root = 'dataset_path'
print(root)
dataset = MAG240M(root)
dataset.prepare_data()
|
v7.0/mania_analyze.py | jsstwright/osumapper | 296 | 11138267 | # -*- coding: utf-8 -*-
#
# JSON osu! map analysis (for osu!mania)
#
import numpy as np;
def get_map_timing_array(map_json, length=-1, divisor=4):
if length == -1:
length = map_json["obj"][-1]["time"] + 1000; # it has an extra time interval after the last note
if map_json["obj"][-1]["type"] & 8: # spinner end
length = map_json["obj"][-1]["spinnerEndTime"] + 1000;
uts_a = map_json["timing"]["uts"];
out = [];
for i, uts in enumerate(uts_a):
begin_time = uts["beginTime"];
mspb = uts["tickLength"];
if i < len(uts_a)-1:
end_time = uts_a[i+1]["beginTime"];
else:
end_time = length;
arr = np.floor(np.arange(begin_time, end_time, mspb / divisor));
out = out + list(map(lambda f: int(f), arr));
return out;
def get_tick_len(map_json, tick):
uts_a = map_json["timing"]["uts"];
if tick < uts_a[0]["beginTime"]:
return uts_a[0]["tickLength"];
_out = 600;
for uts in uts_a:
if tick >= uts["beginTime"]:
_out = uts["tickLength"];
else:
return _out;
return _out;
def get_slider_len(map_json, tick):
ts_a = map_json["timing"]["ts"];
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_slider_len_ts(ts_a, tick):
if tick < ts_a[0]["beginTime"]:
return ts_a[0]["sliderLength"];
_out = 100;
for ts in ts_a:
if tick >= ts["beginTime"]:
_out = ts["sliderLength"];
else:
return _out;
return _out;
def get_end_time(note):
if note["type"] & 8:
return note["spinnerEndTime"];
# elif note["type"] & 2:
# return note["sliderData"]["endTime"];
elif note["type"] & 128:
return note["holdEndTime"];
else:
return note["time"];
# edited from uts to ts
def get_all_ticks_and_lengths_from_ts(uts_array, ts_array, end_time, divisor=4):
# Returns array of all timestamps, ticklens and sliderlens.
endtimes = ([uts["beginTime"] for uts in uts_array] + [end_time])[1:];
timestamps = [np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor) for i, uts in enumerate(uts_array)];
ticks_from_uts = [list(range(len(timestamp_group))) for timestamp_group in timestamps];
tick_len = [[uts["tickLength"]] * len(np.arange(uts["beginTime"], endtimes[i], uts["tickLength"] / divisor)) for i, uts in enumerate(uts_array)];
# slider_len = [[ts["sliderLength"]] * len(np.arange(ts["beginTime"], endtimes[i], ts["tickLength"] / divisor)) for i, ts in enumerate(ts_array)];
slider_len = [get_slider_len_ts(ts_array, timestamp) for timestamp in np.concatenate(timestamps)];
return np.concatenate(ticks_from_uts), np.round(np.concatenate(timestamps)).astype(int), np.concatenate(tick_len), np.array(slider_len);
def is_uts_begin(map_json, tick):
uts_a = map_json["timing"]["uts"];
begin_times = [uts["beginTime"] for uts in uts_a];
for t in begin_times:
if tick > t - 1 and tick < t + 5:
return True
return False
def get_metronome_count(map_json, tick):
uts_a = map_json["timing"]["uts"];
if tick < uts_a[0]["beginTime"]:
return uts_a[0]["whiteLines"];
for uts in reversed(uts_a):
if tick >= uts["beginTime"]:
return uts["whiteLines"];
def get_map_notes_and_patterns(map_json, **kwargs):
"""
Reads JSON map data and creates a list for every tick
Returns:
data = list of data array: [TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3]
patterns = numpy array shape (num_groups, main_metronome * divisor, 2 * key_count + 1)
[:, :, 0] pattern_avail_hold
[:, :, 1:1+key_count] pattern_note_begin
[:, :, 1+key_count:1+2*key_count] pattern_note_end
Ex1, Ex2, Ex3 = tickLength/500, BPM/120, sliderLength/150
"""
# keyword arguments
length = kwargs.get("length", -1);
divisor = kwargs.get("divisor", 4);
note_max_wait_time = kwargs.get("note_max_wait_time", 1000);
main_metronome = kwargs.get("main_metronome", 4);
# constant multipliers and subtractions
tlen_mp = 1/500;
tlen_s = 1;
bpm_mp = 1/120;
bpm_s = 1;
slen_mp = 1/150;
slen_s = 1;
# get the timing array of timestamps of each tick
tick_times = get_map_timing_array(map_json, length = length, divisor = divisor);
objs_all = map_json["obj"];
key_count = map_json["diff"]["CS"];
objs_each = [[] for i in range(key_count)];
for obj in objs_all:
x = obj["x"]
obj_key = np.floor(x * key_count / 512).astype(int)
objs_each[obj_key].append(obj)
def get_note_type_mania(obj):
if not obj:
return 0;
if obj["type"] & 128:
return 4;
return 1;
# object times each key
obj_times_each = []
for objs in objs_each:
obj_times = [obj["time"] for obj in objs]
obj_times_each.append(obj_times)
# object end times each key
obj_end_times_each = []
for objs in objs_each:
obj_end_times = [get_end_time(obj) for obj in objs]
obj_end_times_each.append(obj_end_times)
obj_ptr_each = [0] * key_count
obj_end_ptr_each = [0] * key_count
po = 0;
start_time = obj_times[0] - note_max_wait_time;
last_obj_time = start_time;
holding_each = [0] * key_count
data = [];
pattern_avail_hold = []
pattern_data = []
pattern_data_end = []
pattern_avail_hold_grouped = []
pattern_data_grouped = []
pattern_data_end_grouped = []
# tick count from start of uninherited timing section
uts_i = 0;
# tick is timestamp here
for i, tick in enumerate(tick_times):
if is_uts_begin(map_json, tick):
uts_i = 0;
else:
uts_i += 1;
# save group in a metronome when at the start of next metronome
metronome = get_metronome_count(map_json, tick)
if uts_i % (metronome * divisor) == 0:
if len(pattern_data) > 0 and np.sum(pattern_data) > 0 and np.sum(pattern_data_end) > 0:
pattern_avail_hold_grouped.append(pattern_avail_hold)
pattern_data_grouped.append(pattern_data)
pattern_data_end_grouped.append(pattern_data_end)
pattern_avail_hold = []
pattern_data = []
pattern_data_end = []
# Attach extra vars at the end of each tick date point
tlen = get_tick_len(map_json, tick);
bpm = 60000 / tlen;
slen = get_slider_len(map_json, tick);
ex1 = tlen * tlen_mp - tlen_s;
ex2 = bpm * bpm_mp - bpm_s;
ex3 = slen * slen_mp - slen_s;
has_note = False
has_note_end = False
has_hold = False
# list of length (key_count) for pattern on current tick
tick_pattern = []
tick_pattern_end = []
for k in range(key_count):
objs = objs_each[k]
obj_times = obj_times_each[k]
obj_end_times = obj_end_times_each[k]
# locate pointers
while obj_times[obj_ptr_each[k]] < tick - 5 and obj_ptr_each[k] < len(obj_times) - 1:
obj_ptr_each[k] += 1;
while obj_end_times[obj_end_ptr_each[k]] < tick - 5 and obj_end_ptr_each[k] < len(obj_end_times) - 1:
obj_end_ptr_each[k] += 1;
obj_ptr = obj_ptr_each[k]
obj_end_ptr = obj_end_ptr_each[k]
if obj_times[obj_ptr] >= tick - 5 and obj_times[obj_ptr] <= tick + 5: # found note on key
has_note = True
note_type = get_note_type_mania(objs[obj_ptr])
if note_type == 4:
has_hold = True
tick_pattern.append(1)
else:
tick_pattern.append(0)
if obj_end_times[obj_end_ptr] >= tick - 5 and obj_end_times[obj_end_ptr] <= tick + 5: # found note end on key
has_note_end = True
tick_pattern_end.append(1)
else:
tick_pattern_end.append(0)
if has_note:
# TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3
# For mania, NOTE_TYPE = Hit(1) HoldStartOnly(2) HoldStart+Note(3) HoldEndOnly(4)
# SLIDING = SPINNING = MOMENTUM = 0
if has_note_end:
if has_hold:
data.append([i, tick, 1, 3, 0, 0, 0, ex1, ex2, ex3])
else:
data.append([i, tick, 1, 1, 0, 0, 0, ex1, ex2, ex3])
else:
data.append([i, tick, 1, 2, 0, 0, 0, ex1, ex2, ex3])
else:
if has_note_end:
data.append([i, tick, 0, 4, 0, 0, 0, ex1, ex2, ex3])
else:
data.append([i, tick, 0, 0, 0, 0, 0, ex1, ex2, ex3])
pattern_avail_hold.append(1 if has_hold else 0)
pattern_data.append(tick_pattern)
pattern_data_end.append(tick_pattern_end)
# everything limit to 4 metronomes (main_metronome)
for i, pattern_avail_hold in enumerate(pattern_avail_hold_grouped):
pattern_data = pattern_data_grouped[i]
pattern_data_end = pattern_data_end_grouped[i]
if len(pattern_avail_hold) < main_metronome * divisor:
added_len = main_metronome * divisor - len(pattern_avail_hold)
pattern_avail_hold += [0] * added_len
pattern_avail_hold_grouped[i] = pattern_avail_hold
pattern_data += [[0] * key_count] * added_len
pattern_data_grouped[i] = pattern_data
pattern_data_end += [[0] * key_count] * added_len
pattern_data_end_grouped[i] = pattern_data_end
if len(pattern_avail_hold) > main_metronome * divisor:
pattern_avail_hold = pattern_avail_hold[:main_metronome * divisor]
pattern_avail_hold_grouped[i] = pattern_avail_hold
pattern_data = pattern_data[:main_metronome * divisor]
pattern_data_grouped[i] = pattern_data
pattern_data_end = pattern_data_end[:main_metronome * divisor]
pattern_data_end_grouped[i] = pattern_data_end
if len(pattern_avail_hold_grouped) > 0:
pattern_avail_hold_expanded = np.expand_dims(pattern_avail_hold_grouped, axis=2)
pattern_data = np.concatenate([pattern_avail_hold_expanded, pattern_data_grouped, pattern_data_end_grouped], axis=2)
else:
pattern_data = np.zeros((0, main_metronome * divisor, 1 + 2 * key_count))
return data, pattern_data; |
tests_tensorflow/test_sphere.py | aferrall/redner | 1,146 | 11138272 | <gh_stars>1000+
# Tensorflow by default allocates all GPU memory, leaving very little for rendering.
# We set the environment variable TF_FORCE_GPU_ALLOW_GROWTH to true to enforce on demand
# memory allocation to reduce page faults.
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import pyredner_tensorflow as pyredner
vertices, indices, uvs, normals = pyredner.generate_sphere(64, 128)
m = pyredner.Material(diffuse_reflectance = tf.constant((0.5, 0.5, 0.5)))
obj = pyredner.Object(vertices = vertices,
indices = indices,
uvs = uvs,
normals = normals,
material = m)
cam = pyredner.automatic_camera_placement([obj], resolution = (480, 640))
scene = pyredner.Scene(objects = [obj], camera = cam)
img = pyredner.render_g_buffer(scene, channels = [pyredner.channels.uv, pyredner.channels.shading_normal])
uv_img = tf.concat([img[:, :, :2], tf.zeros((480, 640, 1))], axis=2)
normal_img = img[:, :, 2:]
pyredner.imwrite(uv_img, 'results/test_sphere/uv.png')
pyredner.imwrite(normal_img, 'results/test_sphere/normal.png')
|
lib/cli_output.py | bingpo/Vxscan | 1,511 | 11138329 | import sys
import time
import pyfiglet
from lib.bcolors import Bcolors
from lib.settings import POC, THREADS, SCANDIR, PING, SOCKS5, CHECK_DB
def banner():
ascii_banner = pyfiglet.figlet_format("Vxscan")
print(Bcolors.RED + ascii_banner + Bcolors.ENDC)
def start_out(hosts):
sys.stdout.write(Bcolors.OKBLUE + "[*] https://github.com/al0ne/Vxscan\n" + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + "[*] Scanning POC: " + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + str(POC) + "\n" + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + "[*] Threads: " + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + str(THREADS) + "\n" + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + "[*] Target quantity: " + Bcolors.ENDC)
if type(hosts) == list:
sys.stdout.write(Bcolors.OKBLUE + str(len(hosts)) + "\n" + Bcolors.ENDC)
else:
sys.stdout.write(Bcolors.OKBLUE + '1' + "\n" + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + "[*] Scanning Dir: " + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + str(SCANDIR) + "\n" + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + "[*] Ping: " + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + str(PING) + "\n" + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + "[*] CHECK_DB: " + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + str(CHECK_DB) + "\n" + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + "[*] Socks5 Proxy: " + Bcolors.ENDC)
sys.stdout.write(Bcolors.OKBLUE + str(SOCKS5) + "\n\n" + Bcolors.ENDC)
def console(plugins, domain, text):
timestamp = time.strftime("%H:%M:%S", time.localtime())
timestamp = Bcolors.OKBLUE + '[' + timestamp + ']' + Bcolors.ENDC
plugins = Bcolors.RED + plugins + Bcolors.ENDC
text = Bcolors.OKGREEN + text + Bcolors.ENDC
sys.stdout.write(timestamp + ' - ' + plugins + ' - ' + domain + ' ' + text)
|
examples/contrib/magic_square_and_cards.py | klorel/or-tools | 279 | 11138346 | # Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Magic squares and cards problem in Google CP Solver.
<NAME> (July 1971)
'''
Allowing duplicates values, what is the largest constant sum for an order-3
magic square that can be formed with nine cards from the deck.
'''
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
def main(n=3):
# Create the solver.
solver = pywrapcp.Solver("n-queens")
#
# data
#
# n = 3
#
# declare variables
#
x = {}
for i in range(n):
for j in range(n):
x[(i, j)] = solver.IntVar(1, 13, "x(%i,%i)" % (i, j))
x_flat = [x[(i, j)] for i in range(n) for j in range(n)]
s = solver.IntVar(1, 13 * 4, "s")
counts = [solver.IntVar(0, 4, "counts(%i)" % i) for i in range(14)]
#
# constraints
#
solver.Add(solver.Distribute(x_flat, list(range(14)), counts))
# the standard magic square constraints (sans all_different)
[solver.Add(solver.Sum([x[(i, j)] for j in range(n)]) == s) for i in range(n)]
[solver.Add(solver.Sum([x[(i, j)] for i in range(n)]) == s) for j in range(n)]
solver.Add(solver.Sum([x[(i, i)] for i in range(n)]) == s) # diag 1
solver.Add(solver.Sum([x[(i, n - i - 1)] for i in range(n)]) == s) # diag 2
# redundant constraint
solver.Add(solver.Sum(counts) == n * n)
# objective
objective = solver.Maximize(s, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x_flat)
solution.Add(s)
solution.Add(counts)
# db: DecisionBuilder
db = solver.Phase(x_flat, solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MAX_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
print("s:", s.Value())
print("counts:", [counts[i].Value() for i in range(14)])
for i in range(n):
for j in range(n):
print(x[(i, j)].Value(), end=" ")
print()
print()
num_solutions += 1
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
n = 3
if __name__ == "__main__":
if len(sys.argv) > 1:
n = int(sys.argv[1])
main(n)
|
recipes/Python/347810_Load_datweb_browser_without_using_temp/recipe-347810.py | tdiprima/code | 2,023 | 11138351 | import BaseHTTPServer
import webbrowser
def LoadInDefaultBrowser(html):
"""Display html in the default web browser without creating a temp file.
Instantiates a trivial http server and calls webbrowser.open with a URL
to retrieve html from that server.
"""
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
bufferSize = 1024*1024
for i in xrange(0, len(html), bufferSize):
self.wfile.write(html[i:i+bufferSize])
server = BaseHTTPServer.HTTPServer(('127.0.0.1', 0), RequestHandler)
webbrowser.open('http://127.0.0.1:%s' % server.server_port)
server.handle_request()
if __name__ == '__main__':
LoadInDefaultBrowser('<b>Hello World</b>')
|
runtests.py | efeslab/llvmlite | 1,384 | 11138372 | <gh_stars>1000+
#!/usr/bin/env python
import sys
from llvmlite.tests import main
if __name__ == "__main__":
main()
|
mpcomp/client.py | sanjay051099/try2- | 199 | 11138389 | <reponame>sanjay051099/try2-<filename>mpcomp/client.py
#!/usr/bin/env python
#
# Copyright (C) 2008, 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides a client to interact with Google Data API servers.
This module is used for version 2 of the Google Data APIs. The primary class
in this module is GDClient.
GDClient: handles auth and CRUD operations when communicating with servers.
GDataClient: deprecated client for version one services. Will be removed.
"""
__author__ = "<EMAIL> (<NAME>)"
from mpcomp import atom_client
from mpcomp import core
from mpcomp import http_core
from mpcomp import gdata_data
class Error(Exception):
pass
class RequestError(Error):
status = None
reason = None
body = None
headers = None
class RedirectError(RequestError):
pass
class CaptchaChallenge(RequestError):
captcha_url = None
captcha_token = None
class ClientLoginTokenMissing(Error):
pass
class MissingOAuthParameters(Error):
pass
class ClientLoginFailed(RequestError):
pass
class UnableToUpgradeToken(RequestError):
pass
class Unauthorized(Error):
pass
class BadAuthenticationServiceURL(RedirectError):
pass
class BadAuthentication(RequestError):
pass
class NotModified(RequestError):
pass
class NotImplemented(RequestError):
pass
def error_from_response(message, http_response, error_class, response_body=None):
"""Creates a new exception and sets the HTTP information in the error.
Args:
message: str human readable message to be displayed if the exception is
not caught.
http_response: The response from the server, contains error information.
error_class: The exception to be instantiated and populated with
information from the http_response
response_body: str (optional) specify if the response has already been read
from the http_response object.
"""
if response_body is None:
body = http_response.read()
else:
body = response_body
error = error_class("%s: %i, %s" % (message, http_response.status, body))
error.status = http_response.status
error.reason = http_response.reason
error.body = body
error.headers = http_core.get_headers(http_response)
return error
def get_xml_version(version):
"""Determines which XML schema to use based on the client API version.
Args:
version: string which is converted to an int. The version string is in
the form 'Major.Minor.x.y.z' and only the major version number
is considered. If None is provided assume version 1.
"""
if version is None:
return 1
return int(version.split(".")[0])
class GDClient(atom_client.AtomPubClient):
"""Communicates with Google Data servers to perform CRUD operations.
This class is currently experimental and may change in backwards
incompatible ways.
This class exists to simplify the following three areas involved in using
the Google Data APIs.
CRUD Operations:
The client provides a generic 'request' method for making HTTP requests.
There are a number of convenience methods which are built on top of
request, which include get_feed, get_entry, get_next, post, update, and
delete. These methods contact the Google Data servers.
Auth:
Reading user-specific private data requires authorization from the user as
do any changes to user data. An auth_token object can be passed into any
of the HTTP requests to set the Authorization header in the request.
You may also want to set the auth_token member to a an object which can
use modify_request to set the Authorization header in the HTTP request.
If you are authenticating using the email address and password, you can
use the client_login method to obtain an auth token and set the
auth_token member.
If you are using browser redirects, specifically AuthSub, you will want
to use gdata.gauth.AuthSubToken.from_url to obtain the token after the
redirect, and you will probably want to updgrade this since use token
to a multiple use (session) token using the upgrade_token method.
API Versions:
This client is multi-version capable and can be used with Google Data API
version 1 and version 2. The version should be specified by setting the
api_version member to a string, either '1' or '2'.
"""
# The gsessionid is used by Google Calendar to prevent redirects.
__gsessionid = None
api_version = None
# Name of the Google Data service when making a ClientLogin request.
auth_service = None
# URL prefixes which should be requested for AuthSub and OAuth.
auth_scopes = None
# Name of alternate auth service to use in certain cases
alt_auth_service = None
def request(
self,
method=None,
uri=None,
auth_token=None,
http_request=None,
converter=None,
desired_class=None,
redirects_remaining=4,
**kwargs
):
"""Make an HTTP request to the server.
See also documentation for atom_client.AtomPubClient.request.
If a 302 redirect is sent from the server to the client, this client
assumes that the redirect is in the form used by the Google Calendar API.
The same request URI and method will be used as in the original request,
but a gsessionid URL parameter will be added to the request URI with
the value provided in the server's 302 redirect response. If the 302
redirect is not in the format specified by the Google Calendar API, a
RedirectError will be raised containing the body of the server's
response.
The method calls the client's modify_request method to make any changes
required by the client before the request is made. For example, a
version 2 client could add a GData-Version: 2 header to the request in
its modify_request method.
Args:
method: str The HTTP verb for this request, usually 'GET', 'POST',
'PUT', or 'DELETE'
uri: http_core.Uri, str, or unicode The URL being requested.
auth_token: An object which sets the Authorization HTTP header in its
modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others.
http_request: (optional) http_core.HttpRequest
converter: function which takes the body of the response as its only
argument and returns the desired object.
desired_class: class descended from core.XmlElement to which a
successful response should be converted. If there is no
converter function specified (converter=None) then the
desired_class will be used in calling the
core.parse function. If neither
the desired_class nor the converter is specified, an
HTTP reponse object will be returned.
redirects_remaining: (optional) int, if this number is 0 and the
server sends a 302 redirect, the request method
will raise an exception. This parameter is used in
recursive request calls to avoid an infinite loop.
Any additional arguments are passed through to
atom_client.AtomPubClient.request.
Returns:
An HTTP response object (see http_core.HttpResponse for a
description of the object's interface) if no converter was
specified and no desired_class was specified. If a converter function
was provided, the results of calling the converter are returned. If no
converter was specified but a desired_class was provided, the response
body will be converted to the class using
core.parse.
"""
if isinstance(uri, str):
uri = http_core.Uri.parse_uri(uri)
# Add the gsession ID to the URL to prevent further redirects.
# TODO: If different sessions are using the same client, there will be a
# multitude of redirects and session ID shuffling.
# If the gsession ID is in the URL, adopt it as the standard location.
if uri is not None and uri.query is not None and "gsessionid" in uri.query:
self.__gsessionid = uri.query["gsessionid"]
# The gsession ID could also be in the HTTP request.
elif (
http_request is not None
and http_request.uri is not None
and http_request.uri.query is not None
and "gsessionid" in http_request.uri.query
):
self.__gsessionid = http_request.uri.query["gsessionid"]
# If the gsession ID is stored in the client, and was not present in the
# URI then add it to the URI.
elif self.__gsessionid is not None:
uri.query["gsessionid"] = self.__gsessionid
# The AtomPubClient should call this class' modify_request before
# performing the HTTP request.
# http_request = self.modify_request(http_request)
response = atom_client.AtomPubClient.request(
self,
method=method,
uri=uri,
auth_token=auth_token,
http_request=http_request,
**kwargs
)
# On success, convert the response body using the desired converter
# function if present.
# print (response)
print(response.status)
if response is None:
return None
if response.status == 200 or response.status == 201:
# print (converter)
# print (desired_class)
if converter is not None:
return converter(response)
elif desired_class is not None:
# print (self.api_version)
if self.api_version is not None:
return core.parse(
response.read(),
desired_class,
version=get_xml_version(self.api_version),
)
# No API version was specified, so allow parse to
# use the default version.
return core.parse(response.read(), desired_class)
return response
# TODO: move the redirect logic into the Google Calendar client once it
# exists since the redirects are only used in the calendar API.
elif response.status == 302:
if redirects_remaining > 0:
location = response.getheader("Location") or response.getheader(
"location"
)
if location is not None:
# Make a recursive call with the gsession ID in the URI to follow
# the redirect.
return self.request(
method=method,
uri=location,
auth_token=auth_token,
http_request=http_request,
converter=converter,
desired_class=desired_class,
redirects_remaining=redirects_remaining - 1,
**kwargs
)
else:
raise error_from_response(
"302 received without Location header", response, RedirectError
)
else:
raise error_from_response(
"Too many redirects from server", response, RedirectError
)
elif response.status == 401:
raise error_from_response(
"Unauthorized - Server responded with", response, Unauthorized
)
elif response.status == 304:
raise error_from_response(
"Entry Not Modified - Server responded with", response, NotModified
)
elif response.status == 501:
raise error_from_response(
"This API operation is not implemented. - Server responded with",
response,
NotImplemented,
)
# If the server's response was not a 200, 201, 302, 304, 401, or 501, raise
# an exception.
else:
raise error_from_response("Server responded with", response, RequestError)
def modify_request(self, http_request):
"""Adds or changes request before making the HTTP request.
This client will add the API version if it is specified.
Subclasses may override this method to add their own request
modifications before the request is made.
"""
http_request = atom_client.AtomPubClient.modify_request(self, http_request)
if self.api_version is not None:
http_request.headers["GData-Version"] = self.api_version
return http_request
ModifyRequest = modify_request
def get_feed(
self,
uri,
auth_token=None,
converter=None,
desired_class=gdata_data.GDFeed,
**kwargs
):
abc = self.request(
method="GET",
uri=uri,
auth_token=auth_token,
converter=converter,
desired_class=desired_class,
**kwargs
)
print(abc)
return self.request(
method="GET",
uri=uri,
auth_token=auth_token,
converter=converter,
desired_class=desired_class,
**kwargs
)
GetFeed = get_feed
def get_entry(
self,
uri,
auth_token=None,
converter=None,
desired_class=gdata_data.GDEntry,
etag=None,
**kwargs
):
http_request = http_core.HttpRequest()
# Conditional retrieval
if etag is not None:
http_request.headers["If-None-Match"] = etag
return self.request(
method="GET",
uri=uri,
auth_token=auth_token,
http_request=http_request,
converter=converter,
desired_class=desired_class,
**kwargs
)
GetEntry = get_entry
def get_next(
self, feed, auth_token=None, converter=None, desired_class=None, **kwargs
):
"""Fetches the next set of results from the feed.
When requesting a feed, the number of entries returned is capped at a
service specific default limit (often 25 entries). You can specify your
own entry-count cap using the max-results URL query parameter. If there
are more results than could fit under max-results, the feed will contain
a next link. This method performs a GET against this next results URL.
Returns:
A new feed object containing the next set of entries in this feed.
"""
if converter is None and desired_class is None:
desired_class = feed.__class__
return self.get_feed(
feed.find_next_link(),
auth_token=auth_token,
converter=converter,
desired_class=desired_class,
**kwargs
)
GetNext = get_next
# TODO: add a refresh method to re-fetch the entry/feed from the server
# if it has been updated.
def post(
self, entry, uri, auth_token=None, converter=None, desired_class=None, **kwargs
):
if converter is None and desired_class is None:
desired_class = entry.__class__
http_request = http_core.HttpRequest()
http_request.add_body_part(
entry.to_string(get_xml_version(self.api_version)), "application/atom+xml"
)
return self.request(
method="POST",
uri=uri,
auth_token=<PASSWORD>_token,
http_request=http_request,
converter=converter,
desired_class=desired_class,
**kwargs
)
Post = post
def update(self, entry, auth_token=None, force=False, uri=None, **kwargs):
"""Edits the entry on the server by sending the XML for this entry.
Performs a PUT and converts the response to a new entry object with a
matching class to the entry passed in.
Args:
entry:
auth_token:
force: boolean stating whether an update should be forced. Defaults to
False. Normally, if a change has been made since the passed in
entry was obtained, the server will not overwrite the entry since
the changes were based on an obsolete version of the entry.
Setting force to True will cause the update to silently
overwrite whatever version is present.
uri: The uri to put to. If provided, this uri is PUT to rather than the
inferred uri from the entry's edit link.
Returns:
A new Entry object of a matching type to the entry which was passed in.
"""
http_request = http_core.HttpRequest()
http_request.add_body_part(
entry.to_string(get_xml_version(self.api_version)), "application/atom+xml"
)
# Include the ETag in the request if present.
if force:
http_request.headers["If-Match"] = "*"
elif hasattr(entry, "etag") and entry.etag:
http_request.headers["If-Match"] = entry.etag
if uri is None:
uri = entry.find_edit_link()
return self.request(
method="PUT",
uri=uri,
auth_token=auth_token,
http_request=http_request,
desired_class=entry.__class__,
**kwargs
)
Update = update
def delete(self, entry_or_uri, auth_token=None, force=False, **kwargs):
http_request = http_core.HttpRequest()
# Include the ETag in the request if present.
if force:
http_request.headers["If-Match"] = "*"
elif hasattr(entry_or_uri, "etag") and entry_or_uri.etag:
http_request.headers["If-Match"] = entry_or_uri.etag
# If the user passes in a URL, just delete directly, may not work as
# the service might require an ETag.
if isinstance(entry_or_uri, (str, http_core.Uri)):
return self.request(
method="DELETE",
uri=entry_or_uri,
http_request=http_request,
auth_token=auth_token,
**kwargs
)
return self.request(
method="DELETE",
uri=entry_or_uri.find_edit_link(),
http_request=http_request,
auth_token=auth_token,
**kwargs
)
Delete = delete
def batch(self, feed, uri=None, force=False, auth_token=None, **kwargs):
"""Sends a batch request to the server to execute operation entries.
Args:
feed: A batch feed containing batch entries, each is an operation.
uri: (optional) The uri to which the batch request feed should be POSTed.
If none is provided, then the feed's edit link will be used.
force: (optional) boolean set to True if you want the batch update to
clobber all data. If False, the version in the information in the
feed object will cause the server to check to see that no changes
intervened between when you fetched the data and when you sent the
changes.
auth_token: (optional) An object which sets the Authorization HTTP header
in its modify_request method. Recommended classes include
gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken
among others.
"""
http_request = http_core.HttpRequest()
http_request.add_body_part(
feed.to_string(get_xml_version(self.api_version)), "application/atom+xml"
)
if force:
http_request.headers["If-Match"] = "*"
elif hasattr(feed, "etag") and feed.etag:
http_request.headers["If-Match"] = feed.etag
if uri is None:
uri = feed.find_edit_link()
return self.request(
method="POST",
uri=uri,
auth_token=auth_token,
http_request=http_request,
desired_class=feed.__class__,
**kwargs
)
Batch = batch
# TODO: add a refresh method to request a conditional update to an entry
# or feed.
def _add_query_param(param_string, value, http_request):
if value:
http_request.uri.query[param_string] = value
class Query(object):
def __init__(
self,
text_query=None,
categories=None,
author=None,
alt=None,
updated_min=None,
updated_max=None,
pretty_print=False,
published_min=None,
published_max=None,
start_index=None,
max_results=None,
strict=False,
**custom_parameters
):
"""Constructs a Google Data Query to filter feed contents serverside.
Args:
text_query: Full text search str (optional)
categories: list of strings (optional). Each string is a required
category. To include an 'or' query, put a | in the string between
terms. For example, to find everything in the Fitz category and
the Laurie or Jane category (Fitz and (Laurie or Jane)) you would
set categories to ['Fitz', 'Laurie|Jane'].
author: str (optional) The service returns entries where the author
name and/or email address match your query string.
alt: str (optional) for the Alternative representation type you'd like
the feed in. If you don't specify an alt parameter, the service
returns an Atom feed. This is equivalent to alt='atom'.
alt='rss' returns an RSS 2.0 result feed.
alt='json' returns a JSON representation of the feed.
alt='json-in-script' Requests a response that wraps JSON in a script
tag.
alt='atom-in-script' Requests an Atom response that wraps an XML
string in a script tag.
alt='rss-in-script' Requests an RSS response that wraps an XML
string in a script tag.
updated_min: str (optional), RFC 3339 timestamp format, lower bounds.
For example: 2005-08-09T10:57:00-08:00
updated_max: str (optional) updated time must be earlier than timestamp.
pretty_print: boolean (optional) If True the server's XML response will
be indented to make it more human readable. Defaults to False.
published_min: str (optional), Similar to updated_min but for published
time.
published_max: str (optional), Similar to updated_max but for published
time.
start_index: int or str (optional) 1-based index of the first result to
be retrieved. Note that this isn't a general cursoring mechanism.
If you first send a query with ?start-index=1&max-results=10 and
then send another query with ?start-index=11&max-results=10, the
service cannot guarantee that the results are equivalent to
?start-index=1&max-results=20, because insertions and deletions
could have taken place in between the two queries.
max_results: int or str (optional) Maximum number of results to be
retrieved. Each service has a default max (usually 25) which can
vary from service to service. There is also a service-specific
limit to the max_results you can fetch in a request.
strict: boolean (optional) If True, the server will return an error if
the server does not recognize any of the parameters in the request
URL. Defaults to False.
custom_parameters: other query parameters that are not explicitly defined.
"""
self.text_query = text_query
self.categories = categories or []
self.author = author
self.alt = alt
self.updated_min = updated_min
self.updated_max = updated_max
self.pretty_print = pretty_print
self.published_min = published_min
self.published_max = published_max
self.start_index = start_index
self.max_results = max_results
self.strict = strict
self.custom_parameters = custom_parameters
def add_custom_parameter(self, key, value):
self.custom_parameters[key] = value
AddCustomParameter = add_custom_parameter
def modify_request(self, http_request):
_add_query_param("q", self.text_query, http_request)
if self.categories:
http_request.uri.query["category"] = ",".join(self.categories)
_add_query_param("author", self.author, http_request)
_add_query_param("alt", self.alt, http_request)
_add_query_param("updated-min", self.updated_min, http_request)
_add_query_param("updated-max", self.updated_max, http_request)
if self.pretty_print:
http_request.uri.query["prettyprint"] = "true"
_add_query_param("published-min", self.published_min, http_request)
_add_query_param("published-max", self.published_max, http_request)
if self.start_index is not None:
http_request.uri.query["start-index"] = str(self.start_index)
if self.max_results is not None:
http_request.uri.query["max-results"] = str(self.max_results)
if self.strict:
http_request.uri.query["strict"] = "true"
http_request.uri.query.update(self.custom_parameters)
ModifyRequest = modify_request
class GDQuery(http_core.Uri):
def _get_text_query(self):
return self.query["q"]
def _set_text_query(self, value):
self.query["q"] = value
text_query = property(
_get_text_query,
_set_text_query,
doc="The q parameter for searching for an exact text match on content",
)
|
geoq/core/admin.py | kaydoh/geoq | 471 | 11138393 | <reponame>kaydoh/geoq
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from reversion.admin import VersionAdmin
from django.contrib.gis import admin
from django.shortcuts import render
from django.contrib.gis import admin
from django.http import HttpResponseRedirect
from django import forms
from .models import Project, Job, AOI, Setting, Organization, AOITimer, Responder
from guardian.admin import GuardedModelAdmin
class ObjectAdmin(admin.OSMGeoAdmin, VersionAdmin,):
list_display = ('name', 'created_at', 'updated_at')
@admin.register(AOI)
class AOIAdmin(ObjectAdmin):
filter_horizontal = ("reviewers",)
save_on_top = True
actions = ['rename_aois']
search_fields = ['name', 'id']
class NameInputForm(forms.Form):
_selected_action = forms.CharField(widget=forms.MultipleHiddenInput)
name_field = forms.CharField(max_length=200, required=True, label="Workcell Name")
def rename_aois(self, request, queryset):
form = None
if 'apply' in request.POST:
form = self.NameInputForm(request.POST)
if form.is_valid():
namestring = form.cleaned_data['name_field']
queryset.update(name=namestring)
self.message_user(request, "Succesfully renamed selected Workcells")
return HttpResponseRedirect(request.get_full_path())
if not form:
form = self.NameInputForm(initial={'_selected_action': request.POST.getlist('_selected_action')})
return render(request, 'core/name_input.html', {'name_form': form})
rename_aois.short_description = "Rename Workcells"
@admin.register(Job)
class JobAdmin(GuardedModelAdmin, ObjectAdmin):
filter_horizontal = ("analysts", "reviewers", "feature_types", "required_courses")
list_display = ('name', 'project', 'created_at', 'updated_at', 'map')
fields = ('name', 'map', 'workflow', 'analysts', 'reviewers', 'feature_types', \
'required_courses', 'project', 'tags', 'editor')
readonly_fields = ('created_at', 'updated_at')
save_on_top = True
save_as = True
@admin.register(Setting)
class SettingAdmin(admin.ModelAdmin):
list_display = ['name', 'value']
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
list_display = ('name', 'created_at', 'updated_at')
filter_horizontal = ('project_admins', 'contributors',)
@admin.register(AOITimer)
class AOITimerAdmin(admin.ModelAdmin):
list_display = ('user', 'aoi', 'status', 'started_at', 'completed_at',)
fields = ('user','aoi','status','started_at','completed_at',)
admin.site.register(Organization)
@admin.register(Responder)
class ResponderAdmin(admin.ModelAdmin):
list_display = ('name', 'contact_instructions', 'in_field','last_seen', 'longitude', 'latitude')
fields = ('name', 'contact_instructions', 'in_field','last_seen', 'longitude', 'latitude')
|
tools/build/test/chain.py | Manu343726/boost-cmake | 2,831 | 11138395 | <reponame>Manu343726/boost-cmake
#!/usr/bin/python
# Copyright 2003 <NAME>
# Copyright 2002, 2003 <NAME>
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This tests that :
# 1) the 'make' correctly assigns types to produced targets
# 2) if 'make' creates targets of type CPP, they are correctly used.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
# In order to correctly link this app, 'b.cpp', created by a 'make' rule, should
# be compiled.
t.write("jamroot.jam", "import gcc ;")
t.write("jamfile.jam", r'''
import os ;
if [ os.name ] = NT
{
actions create
{
echo int main() {} > $(<)
}
}
else
{
actions create
{
echo "int main() {}" > $(<)
}
}
IMPORT $(__name__) : create : : create ;
exe a : l dummy.cpp ;
# Needs to be a static lib for Windows - main() cannot appear in DLL.
static-lib l : a.cpp b.cpp ;
make b.cpp : : create ;
''')
t.write("a.cpp", "")
t.write("dummy.cpp", "// msvc needs at least one object file\n")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a.exe")
t.cleanup()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.